diff --git a/.gitignore b/.gitignore index 32b3a22421b5d27a208a4ca65b2ba110b9176c61..b833f755881f1099327cc836dee1646b12d218ce 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ *ipynb +*_pycache_* diff --git a/Makefile b/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..b5685772804c8af4235a8504dc6752bfc9ae5d1d --- /dev/null +++ b/Makefile @@ -0,0 +1,13 @@ +.PHONY: style format + + +style: + python -m black --line-length 119 . + python -m isort . + ruff check --fix . + + +quality: + python -m black --check --line-length 119 . + python -m isort --check-only . + ruff check . diff --git a/README.md b/README.md index 24053ad6341c3c2df3d4d2bf62d98e3a83a1d53f..969563ca75efa4043495985dd87ad767354be8de 100644 --- a/README.md +++ b/README.md @@ -1,13 +1,16 @@ --- -title: MOE LLM GPU Poor Leaderboard -emoji: 👁 -colorFrom: indigo -colorTo: red +title: MOE-LLM-GPU-POOR_LEADERBOARD +emoji: 🔥 +colorFrom: green +colorTo: indigo sdk: gradio -sdk_version: 4.21.0 +sdk_version: 4.9.0 app_file: app.py -pinned: false +pinned: true license: apache-2.0 +fullWidth: true +tags: + - leaderboard --- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/app.py b/app.py new file mode 100755 index 0000000000000000000000000000000000000000..3e5d1d1702ec66f9adc8998e68e3019a36fcd266 --- /dev/null +++ b/app.py @@ -0,0 +1,366 @@ +#!/usr/bin/env python + +import os +import datetime +import socket + +import gradio as gr +import pandas as pd + +from apscheduler.schedulers.background import BackgroundScheduler + +from huggingface_hub import snapshot_download + +from src.display.about import ( + CITATION_BUTTON_LABEL, + CITATION_BUTTON_TEXT, + EVALUATION_QUEUE_TEXT, + INTRODUCTION_TEXT, + LLM_BENCHMARKS_TEXT, + LLM_BENCHMARKS_DETAILS, + FAQ_TEXT, + TITLE +) + +from src.display.css_html_js import custom_css + +from src.display.utils import ( + BENCHMARK_COLS, + COLS, + EVAL_COLS, + EVAL_TYPES, + NUMERIC_INTERVALS, + TYPES, + AutoEvalColumn, + ModelType, + fields, + WeightType, + Precision +) + +from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, H4_TOKEN, IS_PUBLIC, QUEUE_REPO, REPO_ID, RESULTS_REPO +from src.populate import get_evaluation_queue_df, get_leaderboard_df +from src.submission.submit import add_new_eval +from src.utils import get_dataset_summary_table + + +def ui_snapshot_download(repo_id, local_dir, repo_type, tqdm_class, etag_timeout): + try: + print(local_dir) + snapshot_download(repo_id=repo_id, local_dir=local_dir, repo_type=repo_type, tqdm_class=tqdm_class, etag_timeout=etag_timeout) + except Exception as e: + restart_space() + + +def restart_space(): + API.restart_space(repo_id=REPO_ID, token=H4_TOKEN) + + +def init_space(): + dataset_df = get_dataset_summary_table(file_path='blog/Hallucination-Leaderboard-Summary.csv') + + if socket.gethostname() not in {'neuromancer'}: + # sync model_type with open-llm-leaderboard + ui_snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30) + ui_snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30) + raw_data, original_df = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, "", COLS, BENCHMARK_COLS) + + finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = get_evaluation_queue_df(EVAL_REQUESTS_PATH, EVAL_COLS) + return dataset_df, original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df + + +dataset_df, original_df, finished_eval_queue_df, running_eval_queue_df, pending_eval_queue_df = init_space() +leaderboard_df = original_df.copy() + + +# Searching and filtering +def update_table(hidden_df: pd.DataFrame, + columns: list, + type_query: list, + precision_query: list, + size_query: list, + query: str): + filtered_df = filter_models(hidden_df, type_query, size_query, precision_query) + filtered_df = filter_queries(query, filtered_df) + df = select_columns(filtered_df, columns) + return df + + +def search_table(df: pd.DataFrame, query: str) -> pd.DataFrame: + return df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))] + + +def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame: + # always_here_cols = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + + always_here_cols = [c.name for c in fields(AutoEvalColumn) if c.never_hidden] + dummy_col = [AutoEvalColumn.dummy.name] + + # We use COLS to maintain sorting + filtered_df = df[ + # always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name] + always_here_cols + [c for c in COLS if c in df.columns and c in columns] + dummy_col + ] + return filtered_df + + +def filter_queries(query: str, filtered_df: pd.DataFrame): + final_df = [] + if query != "": + queries = [q.strip() for q in query.split(";")] + for _q in queries: + _q = _q.strip() + if _q != "": + temp_filtered_df = search_table(filtered_df, _q) + if len(temp_filtered_df) > 0: + final_df.append(temp_filtered_df) + if len(final_df) > 0: + filtered_df = pd.concat(final_df) + subset = [AutoEvalColumn.model.name, AutoEvalColumn.precision.name, AutoEvalColumn.revision.name] + filtered_df = filtered_df.drop_duplicates(subset=subset) + return filtered_df + + +def filter_models(df: pd.DataFrame, + type_query: list, + size_query: list, + precision_query: list) -> pd.DataFrame: + # Show all models + filtered_df = df + + type_emoji = [t[0] for t in type_query] + filtered_df = filtered_df.loc[df[AutoEvalColumn.model_type_symbol.name].isin(type_emoji)] + filtered_df = filtered_df.loc[df[AutoEvalColumn.precision.name].isin(precision_query + ["None"])] + + numeric_interval = pd.IntervalIndex(sorted([NUMERIC_INTERVALS[s] for s in size_query])) + params_column = pd.to_numeric(df[AutoEvalColumn.params.name], errors="coerce") + mask = params_column.apply(lambda x: any(numeric_interval.contains(x))) + filtered_df = filtered_df.loc[mask] + + return filtered_df + + +# triggered only once at startup => read query parameter if it exists +def load_query(request: gr.Request): + query = request.query_params.get("query") or "" + return query + + +demo = gr.Blocks(css=custom_css) +with demo: + gr.HTML(TITLE) + gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text") + + with gr.Tabs(elem_classes="tab-buttons") as tabs: + with gr.TabItem("Hallucinations Benchmark", + elem_id="llm-benchmark-tab-table", + id=0): + with gr.Row(): + with gr.Column(): + with gr.Row(): + search_bar = gr.Textbox(placeholder=" 🔍 Model search (separate multiple queries with `;`)", + show_label=False, + elem_id="search-bar") + with gr.Row(): + shown_columns = gr.CheckboxGroup( + choices=[ + c.name + for c in fields(AutoEvalColumn) + if not c.hidden and not c.never_hidden and not c.dummy + ], + value=[ + c.name + for c in fields(AutoEvalColumn) + if c.displayed_by_default and not c.hidden and not c.never_hidden + ], + label="Select columns to show", + elem_id="column-select", + interactive=True) + + with gr.Column(min_width=320): + filter_columns_type = gr.CheckboxGroup( + label="Model types", + choices=[t.to_str() for t in ModelType], + value=[t.to_str() for t in ModelType], + interactive=True, + elem_id="filter-columns-type") + + filter_columns_precision = gr.CheckboxGroup( + label="Precision", + choices=[i.value.name for i in Precision], + value=[i.value.name for i in Precision], + interactive=True, + elem_id="filter-columns-precision") + + filter_columns_size = gr.CheckboxGroup( + label="Model sizes (in billions of parameters)", + choices=list(NUMERIC_INTERVALS.keys()), + value=list(NUMERIC_INTERVALS.keys()), + interactive=True, + elem_id="filter-columns-size") + + # breakpoint() + + leaderboard_table = gr.components.Dataframe( + value=leaderboard_df[ + [c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value + [AutoEvalColumn.dummy.name] + ] if leaderboard_df.empty is False else leaderboard_df, + headers=[c.name for c in fields(AutoEvalColumn) if c.never_hidden] + shown_columns.value, + datatype=TYPES, + elem_id="leaderboard-table", + interactive=False, + visible=True) # column_widths=["2%", "20%"] + + # Dummy leaderboard for handling the case when the user uses backspace key + hidden_leaderboard_table_for_search = gr.components.Dataframe( + value=original_df[COLS] if original_df.empty is False else original_df, + headers=COLS, + datatype=TYPES, + visible=False) + + search_bar.submit( + update_table, + [ + hidden_leaderboard_table_for_search, + shown_columns, + filter_columns_type, + filter_columns_precision, + filter_columns_size, + search_bar, + ], + leaderboard_table) + + # Check query parameter once at startup and update search bar + demo.load(load_query, inputs=[], outputs=[search_bar]) + + for selector in [shown_columns, filter_columns_type, filter_columns_precision, filter_columns_size]: + selector.change( + update_table, + [ + hidden_leaderboard_table_for_search, + shown_columns, + filter_columns_type, + filter_columns_precision, + filter_columns_size, + search_bar, + ], + leaderboard_table, + queue=True) + + with gr.TabItem("About", elem_id="llm-benchmark-tab-table", id=2): + gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text") + + dataset_table = gr.components.Dataframe( + value=dataset_df, + headers=list(dataset_df.columns), + datatype=['str', 'markdown', 'str', 'str', 'str'], + elem_id="dataset-table", + interactive=False, + visible=True, + column_widths=["15%", "20%"]) + + gr.Markdown(LLM_BENCHMARKS_DETAILS, elem_classes="markdown-text") + gr.Markdown(FAQ_TEXT, elem_classes="markdown-text") + + with gr.TabItem("Submit a model ", elem_id="llm-benchmark-tab-table", id=3): + with gr.Column(): + with gr.Row(): + gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text") + + with gr.Column(): + with gr.Accordion(f"✅ Finished Evaluations ({len(finished_eval_queue_df)})", open=False): + with gr.Row(): + finished_eval_table = gr.components.Dataframe( + value=finished_eval_queue_df, + headers=EVAL_COLS, + datatype=EVAL_TYPES, + row_count=5) + + with gr.Accordion(f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})", open=False): + with gr.Row(): + running_eval_table = gr.components.Dataframe( + value=running_eval_queue_df, + headers=EVAL_COLS, + datatype=EVAL_TYPES, + row_count=5) + + with gr.Accordion(f"⏳ Scheduled Evaluation Queue ({len(pending_eval_queue_df)})", open=False): + with gr.Row(): + pending_eval_table = gr.components.Dataframe( + value=pending_eval_queue_df, + headers=EVAL_COLS, + datatype=EVAL_TYPES, + row_count=5) + + with gr.Row(): + gr.Markdown("# Submit your model here", elem_classes="markdown-text") + + with gr.Row(): + with gr.Column(): + model_name_textbox = gr.Textbox(label="Model name") + revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main") + private = gr.Checkbox(False, label="Private", visible=not IS_PUBLIC) + model_type = gr.Dropdown( + choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown], + label="Model type", + multiselect=False, + value=None, + interactive=True) + + with gr.Column(): + precision = gr.Dropdown( + choices=[i.value.name for i in Precision if i != Precision.Unknown], + label="Precision", + multiselect=False, + value="float32", + interactive=True) + + weight_type = gr.Dropdown( + choices=[i.value.name for i in WeightType], + label="Weights type", + multiselect=False, + value="Original", + interactive=True) + + base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)") + + submit_button = gr.Button("Submit Eval") + submission_result = gr.Markdown() + submit_button.click( + add_new_eval, + [ + model_name_textbox, + base_model_name_textbox, + revision_name_textbox, + precision, + private, + weight_type, + model_type, + ], + submission_result) + + with gr.Row(): + with gr.Accordion("Citing this leaderboard", open=False): + citation_button = gr.Textbox( + value=CITATION_BUTTON_TEXT, + label=CITATION_BUTTON_LABEL, + lines=20, + elem_id="citation-button", + show_copy_button=True) + +scheduler = BackgroundScheduler() + +scheduler.add_job(restart_space, "interval", seconds=6 * 60 * 60) + + +def launch_backend(): + import subprocess + from src.backend.envs import DEVICE + if DEVICE not in {'cpu'}: + _ = subprocess.run(["python", "backend-cli.py"]) + + +# scheduler.add_job(launch_backend, "interval", seconds=120) + +scheduler.start() +demo.queue(default_concurrency_limit=40).launch() diff --git a/backend-cli.py b/backend-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..888aef36d7955b72eef82effc3ca0c4d539e542a --- /dev/null +++ b/backend-cli.py @@ -0,0 +1,309 @@ +#!/usr/bin/env python + +import os +import json + +import socket +import random +from datetime import datetime + +from src.backend.run_eval_suite import run_evaluation +from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request +from src.backend.sort_queue import sort_models_by_priority +from src.backend.envs import Tasks, EVAL_REQUESTS_PATH_BACKEND, EVAL_RESULTS_PATH_BACKEND, DEVICE, LIMIT, Task + +from src.backend.manage_requests import EvalRequest +from src.leaderboard.read_evals import EvalResult + +from src.envs import QUEUE_REPO, RESULTS_REPO, API +from src.utils import my_snapshot_download + +from src.leaderboard.read_evals import get_raw_eval_results + +from typing import Optional + +import time + +import logging +import pprint + + +def my_set_eval_request(api, eval_request, set_to_status, hf_repo, local_dir): + for i in range(10): + try: + set_eval_request(api=api, eval_request=eval_request, set_to_status=set_to_status, hf_repo=hf_repo, local_dir=local_dir) + return + except Exception: + time.sleep(60) + return + + +logging.getLogger("openai").setLevel(logging.WARNING) + +logging.basicConfig(level=logging.ERROR) +pp = pprint.PrettyPrinter(width=80) + +PENDING_STATUS = "PENDING" +RUNNING_STATUS = "RUNNING" +FINISHED_STATUS = "FINISHED" +FAILED_STATUS = "FAILED" + +TASKS_HARNESS = [task.value for task in Tasks] + + +my_snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60) +my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + + +def sanity_checks(): + print(f'Device: {DEVICE}') + + # pull the eval dataset from the hub and parse any eval requests + # check completed evals and set them to finished + my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + check_completed_evals(api=API, checked_status=RUNNING_STATUS, completed_status=FINISHED_STATUS, + failed_status=FAILED_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND, + hf_repo_results=RESULTS_REPO, local_dir_results=EVAL_RESULTS_PATH_BACKEND) + return + + +def request_to_result_name(request: EvalRequest) -> str: + # Request: EvalRequest(model='meta-llama/Llama-2-13b-hf', private=False, status='FINISHED', + # json_filepath='./eval-queue-bk/meta-llama/Llama-2-13b-hf_eval_request_False_False_False.json', + # weight_type='Original', model_type='pretrained', precision='float32', base_model='', revision='main', + # submitted_time='2023-09-09T10:52:17Z', likes=389, params=13.016, license='?') + # + # EvalResult(eval_name='meta-llama_Llama-2-13b-hf_float32', full_model='meta-llama/Llama-2-13b-hf', + # org='meta-llama', model='Llama-2-13b-hf', revision='main', + # results={'nq_open': 33.739612188365655, 'triviaqa': 74.12505572893447}, + # precision=, + # model_type=, + # weight_type=, + # architecture='LlamaForCausalLM', license='?', likes=389, num_params=13.016, date='2023-09-09T10:52:17Z', still_on_hub=True) + # + org_and_model = request.model.split("/", 1) + if len(org_and_model) == 1: + model = org_and_model[0] + res = f"{model}_{request.precision}" + else: + org = org_and_model[0] + model = org_and_model[1] + res = f"{org}_{model}_{request.precision}" + return res + + +def process_evaluation(task: Task, eval_request: EvalRequest) -> dict: + batch_size = 2 + try: + results = run_evaluation(eval_request=eval_request, task_names=[task.benchmark], num_fewshot=task.num_fewshot, + batch_size=batch_size, device=DEVICE, use_cache=None, limit=LIMIT) + except RuntimeError as e: + if "No executable batch size found" in str(e): + batch_size = 1 + results = run_evaluation(eval_request=eval_request, task_names=[task.benchmark], num_fewshot=task.num_fewshot, + batch_size=batch_size, device=DEVICE, use_cache=None, limit=LIMIT) + else: + raise + + print('RESULTS', results) + + dumped = json.dumps(results, indent=2, default=lambda o: '') + print(dumped) + + output_path = os.path.join(EVAL_RESULTS_PATH_BACKEND, *eval_request.model.split("/"), f"results_{datetime.now()}.json") + os.makedirs(os.path.dirname(output_path), exist_ok=True) + with open(output_path, "w") as f: + f.write(dumped) + + my_snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + API.upload_file(path_or_fileobj=output_path, path_in_repo=f"{eval_request.model}/results_{datetime.now()}.json", + repo_id=RESULTS_REPO, repo_type="dataset") + return results + + +def process_finished_requests(thr: int, hard_task_lst: Optional[list[str]] = None) -> bool: + sanity_checks() + + current_finished_status = [FINISHED_STATUS, FAILED_STATUS] + + # Get all eval request that are FINISHED, if you want to run other evals, change this parameter + eval_requests: list[EvalRequest] = get_eval_requests(job_status=current_finished_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + # Sort the evals by priority (first submitted, first run) + eval_requests: list[EvalRequest] = sort_models_by_priority(api=API, models=eval_requests) + + random.shuffle(eval_requests) + + eval_results: list[EvalResult] = get_raw_eval_results(EVAL_RESULTS_PATH_BACKEND, EVAL_REQUESTS_PATH_BACKEND) + + result_name_to_request = {request_to_result_name(r): r for r in eval_requests} + result_name_to_result = {r.eval_name: r for r in eval_results} + + for eval_request in eval_requests: + if eval_request.likes >= thr: + result_name: str = request_to_result_name(eval_request) + + # Check the corresponding result + eval_result: Optional[EvalResult] = result_name_to_result[result_name] if result_name in result_name_to_result else None + + # breakpoint() + + task_lst = TASKS_HARNESS.copy() + random.shuffle(task_lst) + + # Iterate over tasks and, if we do not have results for a task, run the relevant evaluations + for task in task_lst: + task_name = task.benchmark + + do_run_task = False + if hard_task_lst is None or any(ss in task_name for ss in hard_task_lst): + do_run_task = True + + if (eval_result is None or task_name not in eval_result.results) and do_run_task: + eval_request: EvalRequest = result_name_to_request[result_name] + + my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + my_set_eval_request(api=API, eval_request=eval_request, set_to_status=RUNNING_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + + results = process_evaluation(task, eval_request) + + my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + my_set_eval_request(api=API, eval_request=eval_request, set_to_status=FINISHED_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + + return True + + return False + + +def maybe_refresh_results(thr: int, hard_task_lst: Optional[list[str]] = None) -> bool: + sanity_checks() + + current_finished_status = [PENDING_STATUS, FINISHED_STATUS, FAILED_STATUS] + + # Get all eval request that are FINISHED, if you want to run other evals, change this parameter + eval_requests: list[EvalRequest] = get_eval_requests(job_status=current_finished_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + # Sort the evals by priority (first submitted, first run) + eval_requests: list[EvalRequest] = sort_models_by_priority(api=API, models=eval_requests) + + random.shuffle(eval_requests) + + eval_results: list[EvalResult] = get_raw_eval_results(EVAL_RESULTS_PATH_BACKEND, EVAL_REQUESTS_PATH_BACKEND) + + result_name_to_request = {request_to_result_name(r): r for r in eval_requests} + result_name_to_result = {r.eval_name: r for r in eval_results} + + for eval_request in eval_requests: + if eval_request.likes >= thr: + result_name: str = request_to_result_name(eval_request) + + # Check the corresponding result + eval_result: Optional[EvalResult] = result_name_to_result[result_name] if result_name in result_name_to_result else None + + task_lst = TASKS_HARNESS.copy() + random.shuffle(task_lst) + + # Iterate over tasks and, if we do not have results for a task, run the relevant evaluations + for task in task_lst: + task_name = task.benchmark + + do_run_task = False + if hard_task_lst is None or any(ss in task_name for ss in hard_task_lst): + do_run_task = True + + task_lst = ['nq', 'trivia', 'tqa', 'self'] + if (eval_result is None or do_run_task or task_name not in eval_result.results or + any(ss in task_name for ss in task_lst)): + eval_request: EvalRequest = result_name_to_request[result_name] + + my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + my_set_eval_request(api=API, eval_request=eval_request, set_to_status=RUNNING_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + + results = process_evaluation(task, eval_request) + + my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + my_set_eval_request(api=API, eval_request=eval_request, set_to_status=FINISHED_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + + return True + + return False + + +def process_pending_requests() -> bool: + sanity_checks() + + current_pending_status = [PENDING_STATUS] + + # Get all eval request that are PENDING, if you want to run other evals, change this parameter + eval_requests = get_eval_requests(job_status=current_pending_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + # Sort the evals by priority (first submitted, first run) + eval_requests = sort_models_by_priority(api=API, models=eval_requests) + + random.shuffle(eval_requests) + + print(f"Found {len(eval_requests)} {','.join(current_pending_status)} eval requests") + + if len(eval_requests) == 0: + return False + + eval_request = eval_requests[0] + pp.pprint(eval_request) + + my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + my_set_eval_request(api=API, eval_request=eval_request, set_to_status=RUNNING_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + + task_lst = TASKS_HARNESS.copy() + random.shuffle(task_lst) + + for task in task_lst: + results = process_evaluation(task, eval_request) + + my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + my_set_eval_request(api=API, eval_request=eval_request, set_to_status=FINISHED_STATUS, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + + return True + + +if __name__ == "__main__": + local_debug = True + #debug specific task by ping + if local_debug: + debug_model_names = ['mistralai/Mixtral-8x7B-Instruct-v0.1'] + debug_model_names = ["TheBloke/Mixtral-8x7B-v0.1-GPTQ"] + # debug_task_name = 'ifeval' + debug_task_name = 'selfcheckgpt' + task_lst = TASKS_HARNESS.copy() + for task in task_lst: + for debug_model_name in debug_model_names: + task_name = task.benchmark + if task_name != debug_task_name: + continue + eval_request = EvalRequest(model=debug_model_name, private=False, status='', json_filepath='', precision='float16') + results = process_evaluation(task, eval_request) + + wait = True + hard_task_lst = None + if socket.gethostname() in {'hamburg', 'neuromancer'} or os.path.isdir("/home/pminervi"): + wait = False + hard_task_lst = ['nq', 'trivia', 'tqa'] + + if wait: + time.sleep(60 * random.randint(5, 10)) + + res = False + + if random.randint(0, 10) == 0: + res = process_pending_requests() + time.sleep(60) + + if res is False: + if random.randint(0, 5) == 0: + res = maybe_refresh_results(100, hard_task_lst=hard_task_lst) + else: + res = process_finished_requests(100, hard_task_lst=hard_task_lst) + + time.sleep(60) + + if res is False: + if random.randint(0, 5) == 0: + res = maybe_refresh_results(0, hard_task_lst=hard_task_lst) + else: + res = process_finished_requests(0, hard_task_lst=hard_task_lst) diff --git a/blog/Hallucination-Leaderboard-Summary.csv b/blog/Hallucination-Leaderboard-Summary.csv new file mode 100644 index 0000000000000000000000000000000000000000..550b9eeaed47df906a5dd72c4b963ecee4440a0d --- /dev/null +++ b/blog/Hallucination-Leaderboard-Summary.csv @@ -0,0 +1,20 @@ +Category,Benchmark,Dataset Link,Data Split,Data Size,Language +Closed-book Open-domain QA ,NQ Open (64-shot),https://huggingface.co/datasets/nq_open/viewer/nq_open/validation,validation,3.61k,En +Closed-book Open-domain QA ,NQ Open (8-shot),https://huggingface.co/datasets/nq_open/viewer/nq_open/validation,validation,3.61k,En +Closed-book Open-domain QA ,TriviaQA (64-shot),https://huggingface.co/datasets/trivia_qa/viewer/rc.nocontext/test,test,17.2k,En +Closed-book Open-domain QA ,TriviaQA 8 (8-shot),https://huggingface.co/datasets/trivia_qa/viewer/rc.nocontext/test,test,17.2k,En +Closed-book Open-domain QA ,TruthfulQA MC1 (0-shot),https://huggingface.co/datasets/truthful_qa/viewer/multiple_choice,mc1_targets column,0.8k,En +Closed-book Open-domain QA ,TruthfulQA MC2 (0-shot),https://huggingface.co/datasets/truthful_qa/viewer/multiple_choice,mc2_targets column,0.8k,En +Fact-Checking,FEVER (16-shot),https://huggingface.co/datasets/fever/viewer/v1.0/labelled_dev,labeld_dev,37.6k,En +Hallucination Detection,FaithDial (8-shot),https://huggingface.co/datasets/McGill-NLP/FaithDial,test,3.54k,En +Hallucination Detection,HaluEval QA (0-shot),https://huggingface.co/datasets/pminervini/HaluEval/viewer/qa_samples,qa_samples,10k,En +Hallucination Detection,HaluEval Summ (0-shot),https://huggingface.co/datasets/pminervini/HaluEval/viewer/summarization_samples,summarization_samples,10k,En +Hallucination Detection,HaluEval Dial (0-shot),https://huggingface.co/datasets/pminervini/HaluEval/viewer/dialogue_samples,dialogue_samples,10k,En +Hallucination Detection,TrueFalse (8-shot),https://huggingface.co/datasets/pminervini/true-false/viewer/default/cieacf,cieacf,6.09k,En +Instruction Following,MemoTrap (0-shot),https://huggingface.co/datasets/pminervini/inverse-scaling/viewer/memo-trap,memo-trap,0.9k,En +Instruction Following,IFEval (0-shot),https://huggingface.co/datasets/wis-k/instruction-following-eval,train,0.5k,En +Reading Comprehension,SQuADv2 (4-shot),https://huggingface.co/datasets/squad_v2/viewer/squad_v2/validation,validation,11.9k,En +Reading Comprehension,RACE (0-shot),https://huggingface.co/datasets/EleutherAI/race,test,1.05k,En +Self-Consistency,SelfCheckGPT (0-shot),https://huggingface.co/datasets/potsawee/wiki_bio_gpt3_hallucination,validation,0.2k,En +Summarisation,XSum (2-shot),https://huggingface.co/datasets/EdinburghNLP/xsum/viewer/default/test,test,11.3k,En +Summarisation,CNN/DM (2-shot),https://huggingface.co/datasets/cnn_dailymail/viewer/3.0.0/test,test,11.5k,En \ No newline at end of file diff --git a/cli/analysis-cli.py b/cli/analysis-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..d80389ea19efce48c18a0f9c621c36331d768eb6 --- /dev/null +++ b/cli/analysis-cli.py @@ -0,0 +1,336 @@ +#!/usr/bin/env python3 + +import os +import sys +import json +import pickle + +import numpy as np + +import pandas as pd +import seaborn as sns +import matplotlib.pyplot as plt + +from scipy.cluster.hierarchy import linkage + +from src.backend.envs import Tasks, EVAL_REQUESTS_PATH_BACKEND, EVAL_RESULTS_PATH_BACKEND, DEVICE, LIMIT, Task + +from src.envs import QUEUE_REPO, RESULTS_REPO, API +from src.utils import my_snapshot_download + + +def is_float(string): + try: + float(string) + return True + except ValueError: + return False + + +def find_json_files(json_path): + res = [] + for root, dirs, files in os.walk(json_path): + for file in files: + if file.endswith(".json"): + res.append(os.path.join(root, file)) + return res + + +def sanitise_metric(name: str) -> str: + res = name + res = res.replace("prompt_level_strict_acc", "Prompt-Level Accuracy") + res = res.replace("acc", "Accuracy") + res = res.replace("exact_match", "EM") + res = res.replace("avg-selfcheckgpt", "AVG") + res = res.replace("max-selfcheckgpt", "MAX") + res = res.replace("rouge", "ROUGE-") + res = res.replace("bertscore_precision", "BERT-P") + res = res.replace("exact", "EM") + res = res.replace("HasAns_EM", "HasAns") + res = res.replace("NoAns_EM", "NoAns") + res = res.replace("em", "EM") + return res + + +def sanitise_dataset(name: str) -> str: + res = name + res = res.replace("tqa8", "TriviaQA (8-shot)") + res = res.replace("nq8", "NQ (8-shot)") + res = res.replace("nq_open", "NQ (64-shot)") + res = res.replace("triviaqa", "TriviaQA (64-shot)") + res = res.replace("truthfulqa", "TruthfulQA") + res = res.replace("ifeval", "IFEval") + res = res.replace("selfcheckgpt", "SelfCheckGPT") + res = res.replace("truefalse_cieacf", "True-False") + res = res.replace("mc", "MC") + res = res.replace("race", "RACE") + res = res.replace("squad", "SQuAD") + res = res.replace("memo-trap", "MemoTrap") + res = res.replace("cnndm", "CNN/DM") + res = res.replace("xsum", "XSum") + res = res.replace("qa", "QA") + res = res.replace("summarization", "Summarization") + res = res.replace("dialogue", "Dialog") + res = res.replace("halueval", "HaluEval") + res = res.replace("_v2", "") + res = res.replace("_", " ") + return res + + +cache_file = 'data_map_cache.pkl' + + +def load_data_map_from_cache(cache_file): + if os.path.exists(cache_file): + with open(cache_file, 'rb') as f: + return pickle.load(f) + else: + return None + + +def save_data_map_to_cache(data_map, cache_file): + with open(cache_file, 'wb') as f: + pickle.dump(data_map, f) + + +# Try to load the data_map from the cache file +data_map = load_data_map_from_cache(cache_file) + + +if data_map is None: + my_snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + + result_path_lst = find_json_files(EVAL_RESULTS_PATH_BACKEND) + request_path_lst = find_json_files(EVAL_REQUESTS_PATH_BACKEND) + + model_name_to_model_map = {} + + for path in request_path_lst: + with open(path, 'r') as f: + data = json.load(f) + model_name_to_model_map[data["model"]] = data + + model_dataset_metric_to_result_map = {} + + # data_map[model_name][(dataset_name, sanitised_metric_name)] = value + data_map = {} + + for path in result_path_lst: + with open(path, 'r') as f: + data = json.load(f) + model_name = data["config"]["model_name"] + for dataset_name, results_dict in data["results"].items(): + for metric_name, value in results_dict.items(): + + if model_name_to_model_map[model_name]["likes"] > 128: + + to_add = True + + if 'f1' in metric_name: + to_add = False + + if 'stderr' in metric_name: + to_add = False + + if 'memo-trap_v2' in dataset_name: + to_add = False + + if 'faithdial' in dataset_name: + to_add = False + + if 'truthfulqa_gen' in dataset_name: + to_add = False + + if 'bertscore' in metric_name: + if 'precision' not in metric_name: + to_add = False + + if 'halueval' in dataset_name: + if 'acc' not in metric_name: + to_add = False + + if 'ifeval' in dataset_name: + if 'prompt_level_strict_acc' not in metric_name: + to_add = False + + if 'squad' in dataset_name: + # to_add = False + if 'best_exact' in metric_name: + to_add = False + + if 'fever' in dataset_name: + to_add = False + + if ('xsum' in dataset_name or 'cnn' in dataset_name) and 'v2' not in dataset_name: + to_add = False + + if isinstance(value, str): + if is_float(value): + value = float(value) + else: + to_add = False + + if to_add: + if 'rouge' in metric_name: + value /= 100.0 + + if 'squad' in dataset_name: + value /= 100.0 + + sanitised_metric_name = metric_name + if "," in sanitised_metric_name: + sanitised_metric_name = sanitised_metric_name.split(',')[0] + sanitised_metric_name = sanitise_metric(sanitised_metric_name) + sanitised_dataset_name = sanitise_dataset(dataset_name) + + model_dataset_metric_to_result_map[(model_name, sanitised_dataset_name, sanitised_metric_name)] = value + + if model_name not in data_map: + data_map[model_name] = {} + data_map[model_name][(sanitised_dataset_name, sanitised_metric_name)] = value + + print('model_name', model_name, 'dataset_name', sanitised_dataset_name, 'metric_name', sanitised_metric_name, 'value', value) + + save_data_map_to_cache(data_map, cache_file) + +model_name_lst = [m for m in data_map.keys()] + +nb_max_metrics = max(len(data_map[model_name]) for model_name in model_name_lst) + +for model_name in model_name_lst: + if len(data_map[model_name]) < nb_max_metrics - 5: + del data_map[model_name] + +plot_type_lst = ['all', 'summ', 'qa', 'instr', 'detect', 'rc'] + +for plot_type in plot_type_lst: + + data_map_v2 = {} + for model_name in data_map.keys(): + for dataset_metric in data_map[model_name].keys(): + if dataset_metric not in data_map_v2: + data_map_v2[dataset_metric] = {} + + if plot_type in {'all'}: + to_add = True + if 'ROUGE' in dataset_metric[1] and 'ROUGE-L' not in dataset_metric[1]: + to_add = False + if 'SQuAD' in dataset_metric[0] and 'EM' not in dataset_metric[1]: + to_add = False + if 'SelfCheckGPT' in dataset_metric[0] and 'MAX' not in dataset_metric[1]: + to_add = False + if '64-shot' in dataset_metric[0]: + to_add = False + if to_add is True: + data_map_v2[dataset_metric][model_name] = data_map[model_name][dataset_metric] + elif plot_type in {'summ'}: + if 'CNN' in dataset_metric[0] or 'XSum' in dataset_metric[0]: + data_map_v2[dataset_metric][model_name] = data_map[model_name][dataset_metric] + elif plot_type in {'qa'}: + if 'TriviaQA' in dataset_metric[0] or 'NQ' in dataset_metric[0] or 'TruthfulQA' in dataset_metric[0]: + data_map_v2[dataset_metric][model_name] = data_map[model_name][dataset_metric] + elif plot_type in {'instr'}: + if 'MemoTrap' in dataset_metric[0] or 'IFEval' in dataset_metric[0]: + data_map_v2[dataset_metric][model_name] = data_map[model_name][dataset_metric] + elif plot_type in {'detect'}: + if 'HaluEval' in dataset_metric[0] or 'SelfCheck' in dataset_metric[0]: + data_map_v2[dataset_metric][model_name] = data_map[model_name][dataset_metric] + elif plot_type in {'rc'}: + if 'RACE' in dataset_metric[0] or 'SQuAD' in dataset_metric[0]: + data_map_v2[dataset_metric][model_name] = data_map[model_name][dataset_metric] + else: + assert False, f"Unknown plot type: {plot_type}" + + # df = pd.DataFrame.from_dict(data_map, orient='index') # Invert the y-axis (rows) + df = pd.DataFrame.from_dict(data_map_v2, orient='index') # Invert the y-axis (rows) + df.index = [', '.join(map(str, idx)) for idx in df.index] + + o_df = df.copy(deep=True) + + # breakpoint() + + print(df) + + # Check for NaN or infinite values and replace them + df.replace([np.inf, -np.inf], np.nan, inplace=True) # Replace infinities with NaN + df.fillna(0, inplace=True) # Replace NaN with 0 (or use another imputation strategy) + + from sklearn.preprocessing import MinMaxScaler + + # scaler = MinMaxScaler() + # df = pd.DataFrame(scaler.fit_transform(df), index=df.index, columns=df.columns) + + # Calculate dimensions based on the DataFrame size + cell_height = 1.0 # Height of each cell in inches + cell_width = 1.0 # Width of each cell in inches + + n_rows = len(df.index) # Datasets and Metrics + n_cols = len(df.columns) # Models + + # Calculate figure size dynamically + fig_width = cell_width * n_cols + 0 + fig_height = cell_height * n_rows + 0 + + col_cluster = True + row_cluster = True + + sns.set_context("notebook", font_scale=1.3) + + dendrogram_ratio = (.1, .1) + + if plot_type in {'detect'}: + fig_width = cell_width * n_cols - 2 + fig_height = cell_height * n_rows + 5.2 + dendrogram_ratio = (.1, .2) + + if plot_type in {'instr'}: + fig_width = cell_width * n_cols - 2 + fig_height = cell_height * n_rows + 5.2 + dendrogram_ratio = (.1, .4) + + if plot_type in {'qa'}: + fig_width = cell_width * n_cols - 2 + fig_height = cell_height * n_rows + 4 + dendrogram_ratio = (.1, .2) + + if plot_type in {'summ'}: + fig_width = cell_width * n_cols - 2 + fig_height = cell_height * n_rows + 2.0 + dendrogram_ratio = (.1, .1) + row_cluster = False + + if plot_type in {'rc'}: + fig_width = cell_width * n_cols - 2 + fig_height = cell_height * n_rows + 5.2 + dendrogram_ratio = (.1, .4) + + print('figsize', (fig_width, fig_height)) + + o_df.to_json(f'plots/clustermap_{plot_type}.json', orient='split') + + print(f'Generating the clustermaps for {plot_type}') + + for cmap in [None, 'coolwarm', 'viridis']: + fig = sns.clustermap(df, + method='ward', + metric='euclidean', + cmap=cmap, + figsize=(fig_width, fig_height), # figsize=(24, 16), + annot=True, + mask=o_df.isnull(), + dendrogram_ratio=dendrogram_ratio, + fmt='.2f', + col_cluster=col_cluster, + row_cluster=row_cluster) + + # Adjust the size of the cells (less wide) + plt.setp(fig.ax_heatmap.get_yticklabels(), rotation=0) + plt.setp(fig.ax_heatmap.get_xticklabels(), rotation=90) + + cmap_suffix = '' if cmap is None else f'_{cmap}' + + # Save the clustermap to file + fig.savefig(f'blog/figures/clustermap_{plot_type}{cmap_suffix}.pdf') + fig.savefig(f'blog/figures/clustermap_{plot_type}{cmap_suffix}.png') + fig.savefig(f'blog/figures/clustermap_{plot_type}{cmap_suffix}_t.png', transparent=True, facecolor="none") diff --git a/cli/averitec-upload-cli.py b/cli/averitec-upload-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..5800a1e3bfe4661d5b091d949d46444b9fcc4076 --- /dev/null +++ b/cli/averitec-upload-cli.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 + +from datasets import load_dataset + +path = 'pminervini/averitec' + +ds = load_dataset("json", + data_files={ + 'train': '/Users/pasquale/workspace/AVeriTeC/data/train.json', + 'dev': '/Users/pasquale/workspace/AVeriTeC/data/dev.json' + }) +ds.push_to_hub(path) diff --git a/cli/beta-cli.py b/cli/beta-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..bdfb884aa082e93fe4a4b6b75e85a585eaa3641a --- /dev/null +++ b/cli/beta-cli.py @@ -0,0 +1,71 @@ +#!/usr/bin/env python + +from huggingface_hub import snapshot_download +from src.leaderboard.read_evals import get_raw_eval_results +from src.envs import API, EVAL_REQUESTS_PATH, EVAL_RESULTS_PATH, QUEUE_REPO, RESULTS_REPO + +from src.backend.run_eval_suite import run_evaluation +from src.backend.manage_requests import check_completed_evals, get_eval_requests, set_eval_request +from src.backend.sort_queue import sort_models_by_priority +from src.backend.envs import Tasks, EVAL_REQUESTS_PATH_BACKEND, EVAL_RESULTS_PATH_BACKEND, DEVICE, LIMIT, Task + +from src.leaderboard.read_evals import get_raw_eval_results + +from src.backend.manage_requests import EvalRequest +from src.leaderboard.read_evals import EvalResult + +snapshot_download(repo_id=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30) +snapshot_download(repo_id=RESULTS_REPO, local_dir=EVAL_RESULTS_PATH, repo_type="dataset", tqdm_class=None, etag_timeout=30) + +PENDING_STATUS = "PENDING" +RUNNING_STATUS = "RUNNING" +FINISHED_STATUS = "FINISHED" +FAILED_STATUS = "FAILED" + +TASKS_HARNESS = [task.value for task in Tasks] + +current_finished_status = [FINISHED_STATUS] + + +def request_to_result_name(request: EvalRequest) -> str: + org_and_model = request.model.split("/", 1) + if len(org_and_model) == 1: + model = org_and_model[0] + res = f"{model}_{request.precision}" + else: + org = org_and_model[0] + model = org_and_model[1] + res = f"{org}_{model}_{request.precision}" + return res + + +# Get all eval request that are FINISHED, if you want to run other evals, change this parameter +eval_requests: list[EvalRequest] = get_eval_requests(job_status=current_finished_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) +# Sort the evals by priority (first submitted first run) +eval_requests: list[EvalRequest] = sort_models_by_priority(api=API, models=eval_requests) + +eval_results: list[EvalResult] = get_raw_eval_results(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH) + +result_name_to_request = {request_to_result_name(r): r for r in eval_requests} +result_name_to_result = {r.eval_name: r for r in eval_results} + +print('Requests', sorted(result_name_to_request.keys())) +print('Results', sorted(result_name_to_result.keys())) + +for eval_request in eval_requests: + result_name: str = request_to_result_name(eval_request) + + # Check the corresponding result + eval_result: EvalResult = result_name_to_result[result_name] + + # Iterate over tasks and, if we do not have results for a task, run the relevant evaluations + for task in TASKS_HARNESS: + task_name = task.benchmark + + if task_name not in eval_result.results: + print('RUN THIS ONE!', result_name, task_name) + +raw_data = get_raw_eval_results(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH) +all_data_json = [v.to_dict() for v in raw_data if v.is_complete()] + +breakpoint() diff --git a/cli/completed-cli.py b/cli/completed-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..965c073c6295a39984271803db26394c934a49f0 --- /dev/null +++ b/cli/completed-cli.py @@ -0,0 +1,124 @@ +#!/usr/bin/env python + +from huggingface_hub import snapshot_download + +from src.backend.manage_requests import get_eval_requests +from src.backend.sort_queue import sort_models_by_priority +from src.backend.envs import Tasks, EVAL_REQUESTS_PATH_BACKEND, EVAL_RESULTS_PATH_BACKEND + +from src.backend.manage_requests import EvalRequest +from src.leaderboard.read_evals import EvalResult + +from src.envs import QUEUE_REPO, RESULTS_REPO, API + +import logging +import pprint + +logging.getLogger("openai").setLevel(logging.WARNING) + +logging.basicConfig(level=logging.ERROR) +pp = pprint.PrettyPrinter(width=80) + +PENDING_STATUS = "PENDING" +RUNNING_STATUS = "RUNNING" +FINISHED_STATUS = "FINISHED" +FAILED_STATUS = "FAILED" + +TASKS_HARNESS = [task.value for task in Tasks] + +snapshot_download(repo_id=RESULTS_REPO, revision="main", local_dir=EVAL_RESULTS_PATH_BACKEND, repo_type="dataset", max_workers=60) +snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + + +def request_to_result_name(request: EvalRequest) -> str: + org_and_model = request.model.split("/", 1) + if len(org_and_model) == 1: + model = org_and_model[0] + res = f"{model}_{request.precision}" + else: + org = org_and_model[0] + model = org_and_model[1] + res = f"{org}_{model}_{request.precision}" + return res + + +def process_finished_requests() -> bool: + current_finished_status = [FINISHED_STATUS] + + if False: + import os + import dateutil + model_result_filepaths = [] + results_path = f'{EVAL_RESULTS_PATH_BACKEND}/EleutherAI/gpt-neo-1.3B' + requests_path = f'{EVAL_REQUESTS_PATH_BACKEND}/EleutherAI/gpt-neo-1.3B_eval_request_False_False_False.json' + + for root, _, files in os.walk(results_path): + # We should only have json files in model results + if len(files) == 0 or any([not f.endswith(".json") for f in files]): + continue + + # Sort the files by date + try: + files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7]) + except dateutil.parser._parser.ParserError: + files = [files[-1]] + + for file in files: + model_result_filepaths.append(os.path.join(root, file)) + + eval_results = {} + for model_result_filepath in model_result_filepaths: + # Creation of result + eval_result = EvalResult.init_from_json_file(model_result_filepath) + eval_result.update_with_request_file(requests_path) + + print('XXX', eval_result) + + # Store results of same eval together + eval_name = eval_result.eval_name + if eval_name in eval_results.keys(): + eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None}) + else: + eval_results[eval_name] = eval_result + + print(eval_results) + + return True + + # Get all eval request that are FINISHED, if you want to run other evals, change this parameter + eval_requests: list[EvalRequest] = get_eval_requests(job_status=current_finished_status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + # Sort the evals by priority (first submitted first run) + eval_requests: list[EvalRequest] = sort_models_by_priority(api=API, models=eval_requests) + + # XXX + # eval_requests = [r for r in eval_requests if 'neo-1.3B' in r.model] + + import random + random.shuffle(eval_requests) + + from src.leaderboard.read_evals import get_raw_eval_results + eval_results: list[EvalResult] = get_raw_eval_results(EVAL_RESULTS_PATH_BACKEND, EVAL_REQUESTS_PATH_BACKEND) + + result_name_to_request = {request_to_result_name(r): r for r in eval_requests} + result_name_to_result = {r.eval_name: r for r in eval_results} + + for eval_request in eval_requests: + result_name: str = request_to_result_name(eval_request) + + # Check the corresponding result + from typing import Optional + eval_result: Optional[EvalResult] = result_name_to_result[result_name] if result_name in result_name_to_result else None + + # Iterate over tasks and, if we do not have results for a task, run the relevant evaluations + for task in TASKS_HARNESS: + task_name = task.benchmark + + if eval_result is None or task_name not in eval_result.results: + eval_request: EvalRequest = result_name_to_request[result_name] + + # print(eval_result) + print(result_name, 'is incomplete -- missing task:', task_name, eval_result, eval_request.likes) + + +if __name__ == "__main__": + res = process_finished_requests() diff --git a/cli/create_request_file.py b/cli/create_request_file.py new file mode 100644 index 0000000000000000000000000000000000000000..91846efebc36f8e06c6714aaa5700824f4c80eed --- /dev/null +++ b/cli/create_request_file.py @@ -0,0 +1,107 @@ +import json +import os +import pprint +import re +from datetime import datetime, timezone + +import click +from colorama import Fore +from huggingface_hub import HfApi, snapshot_download + +EVAL_REQUESTS_PATH = "eval-queue" +QUEUE_REPO = "hallucinations-leaderboard/requests" + +precisions = ("float16", "bfloat16", "8bit (LLM.int8)", "4bit (QLoRA / FP4)", "GPTQ") +model_types = ("pretrained", "fine-tuned", "RL-tuned", "instruction-tuned") +weight_types = ("Original", "Delta", "Adapter") + + +def get_model_size(model_info, precision: str): + size_pattern = size_pattern = re.compile(r"(\d\.)?\d+(b|m)") + try: + model_size = round(model_info.safetensors["total"] / 1e9, 3) + except (AttributeError, TypeError): + try: + size_match = re.search(size_pattern, model_info.modelId.lower()) + model_size = size_match.group(0) + model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3) + except AttributeError: + return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py + + size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1 + model_size = size_factor * model_size + return model_size + + +def main(): + api = HfApi() + current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH, repo_type="dataset") + + model_name = click.prompt("Enter model name") + revision = click.prompt("Enter revision", default="main") + precision = click.prompt("Enter precision", default="float32", type=click.Choice(precisions)) + model_type = click.prompt("Enter model type", type=click.Choice(model_types)) + weight_type = click.prompt("Enter weight type", default="Original", type=click.Choice(weight_types)) + base_model = click.prompt("Enter base model", default="") + status = click.prompt("Enter status", default="FINISHED") + + try: + model_info = api.model_info(repo_id=model_name, revision=revision) + except Exception as e: + print(f"{Fore.RED}Could not find model info for {model_name} on the Hub\n{e}{Fore.RESET}") + return 1 + + model_size = get_model_size(model_info=model_info, precision=precision) + + try: + license = model_info.cardData["license"] + except Exception: + license = "?" + + eval_entry = { + "model": model_name, + "base_model": base_model, + "revision": revision, + "private": False, + "precision": precision, + "weight_type": weight_type, + "status": status, + "submitted_time": current_time, + "model_type": model_type, + "likes": model_info.likes, + "params": model_size, + "license": license, + } + + user_name = "" + model_path = model_name + if "/" in model_name: + user_name = model_name.split("/")[0] + model_path = model_name.split("/")[1] + + pprint.pprint(eval_entry) + + if click.confirm("Do you want to continue? This request file will be pushed to the hub"): + click.echo("continuing...") + + out_dir = f"{EVAL_REQUESTS_PATH}/{user_name}" + os.makedirs(out_dir, exist_ok=True) + out_path = f"{out_dir}/{model_path}_eval_request_{False}_{precision}_{weight_type}.json" + + with open(out_path, "w") as f: + f.write(json.dumps(eval_entry)) + + api.upload_file( + path_or_fileobj=out_path, + path_in_repo=out_path.split(f"{EVAL_REQUESTS_PATH}/")[1], + repo_id=QUEUE_REPO, + repo_type="dataset", + commit_message=f"Add {model_name} to eval queue", + ) + else: + click.echo("aborting...") + + +if __name__ == "__main__": + main() diff --git a/cli/eval-cli.py b/cli/eval-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..6ad5084674dd4e71c39e1308e033a4348d218b43 --- /dev/null +++ b/cli/eval-cli.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python + +from huggingface_hub import snapshot_download + +from src.backend.envs import EVAL_REQUESTS_PATH_BACKEND +from src.backend.manage_requests import get_eval_requests +from src.backend.manage_requests import EvalRequest +from src.backend.run_eval_suite import run_evaluation + +from src.backend.tasks.xsum.task import XSum +from src.backend.tasks.xsum.task_v2 import XSumv2 + +from src.backend.tasks.cnndm.task import CNNDM +from src.backend.tasks.cnndm.task_v2 import CNNDMv2 + +from src.backend.tasks.selfcheckgpt.task import SelfCheckGPT + +from lm_eval.tasks import TaskManager +from lm_eval import tasks, evaluator, utils + +from src.backend.envs import Tasks, EVAL_REQUESTS_PATH_BACKEND, EVAL_RESULTS_PATH_BACKEND, DEVICE, LIMIT, Task +from src.envs import QUEUE_REPO + +from lm_eval.models.huggingface import HFLM + + +def main(): + # snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + + PENDING_STATUS = "PENDING" + RUNNING_STATUS = "RUNNING" + FINISHED_STATUS = "FINISHED" + FAILED_STATUS = "FAILED" + + status = [PENDING_STATUS, RUNNING_STATUS, FINISHED_STATUS, FAILED_STATUS] + + # Get all eval request that are FINISHED, if you want to run other evals, change this parameter + eval_requests: list[EvalRequest] = get_eval_requests(job_status=status, + hf_repo=QUEUE_REPO, + local_dir=EVAL_REQUESTS_PATH_BACKEND, + do_download=False) + # eval_request = [r for r in eval_requests if 'bloom-560m' in r.model][0] + eval_request = [r for r in eval_requests if 'meta-llama/Llama-2-7b-hf' in r.model][0] + + # my_task = Task("memo-trap", "acc", "memo-trap", 0) + # my_task = Task("selfcheckgpt", "avg-selfcheckgpt", "SGPT", 2) + # my_task = Task("ifeval", "prompt_level_strict_acc", "IFEval", 0) + # my_task = Task("truefalse_cieacf", "acc", "TrueFalse", 5) + # my_task = Task("faithdial_hallu", "acc", "FaithDIAL", 2) + + # my_task = Task("nq_swap", "exact_match", "NQ-Swap", 2) + # my_task = Task("memo-trap_v2", "acc", "XXX", 2) + my_task = Task("xsum_v2", "rougeL", "XXX", 0) + # my_task = Task("squadv2", "exact", "XXX", 0) + # my_task = Task("scrolls_qasper", "f1", "XXX", 0) + + eval_logger = utils.eval_logger + import logging + eval_logger.setLevel(getattr(logging, "DEBUG")) + + TASKS_HARNESS = [my_task] + # task_names = ['triviaqa'] + # TASKS_HARNESS = [task.value for task in Tasks] + + # include_task_folder("src/backend/tasks/") + task_manager = TaskManager(include_path="./src/backend/tasks/") + # task_manager.initialize_tasks(include_path="src/backend/tasks/") + + # breakpoint() + + print(task_manager.all_tasks) + + for task in TASKS_HARNESS: + print(f"Selected Tasks: [{task}]") + import torch + + # breakpoint() + results = evaluator.simple_evaluate(model="hf", model_args=eval_request.get_model_args(), tasks=[task.benchmark], num_fewshot=task.num_fewshot, + batch_size=1, device="mps", use_cache=None, limit=2, write_out=True, task_manager=task_manager) + print('AAA', results["results"]) + + breakpoint() + + +if __name__ == "__main__": + main() diff --git a/cli/fever-upload-cli.py b/cli/fever-upload-cli.py new file mode 100644 index 0000000000000000000000000000000000000000..b808f487df2aa8c06f7093cf2889af08077187d7 --- /dev/null +++ b/cli/fever-upload-cli.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 + +import glob +import os + +import random +from tqdm import tqdm + +from datasets import Dataset, DatasetDict, load_dataset + + +def convert(list_of_dicts): + res = {} + for d in list_of_dicts: + for k, v in d.items(): + res.setdefault(k, []).append(v) + return res + + +v10 = load_dataset("fever", "v1.0") +name_lst = ['train', 'labelled_dev'] + +old_to_new_label_map = { + 'SUPPORTS': 'supported', + 'REFUTES': 'refuted' +} + +data_map = {} + +for name in name_lst: + instance_lst = [] + + for entry in tqdm(v10[name]): + id_ = entry['id'] + label = entry['label'] + claim = entry['claim'] + + evidence_id = entry['evidence_id'] + evidence_wiki_url = entry['evidence_wiki_url'] + + if evidence_id != -1: + assert label in {'SUPPORTS', 'REFUTES'} + + instance = {'id': id_, 'label': old_to_new_label_map[label], 'claim': claim} + instance_lst.append(instance) + + key = 'dev' if name in {'labelled_dev'} else name + + instance_lst = sorted([dict(t) for t in {tuple(d.items()) for d in instance_lst}], key=lambda d: d['claim']) + + label_to_instance_lst = {} + for e in instance_lst: + if e['label'] not in label_to_instance_lst: + label_to_instance_lst[e['label']] = [] + label_to_instance_lst[e['label']].append(e) + + min_len = min(len(v) for k, v in label_to_instance_lst.items()) + + new_instance_lst = [] + for k in sorted(label_to_instance_lst.keys()): + new_instance_lst += label_to_instance_lst[k][:min_len] + + random.Random(42).shuffle(new_instance_lst) + data_map[key] = new_instance_lst + +ds_path = 'pminervini/hl-fever' + +task_to_ds_map = {k: Dataset.from_dict(convert(v)) for k, v in data_map.items()} +ds_dict = DatasetDict(task_to_ds_map) + +ds_dict.push_to_hub(ds_path, "v1.0") + +# breakpoint() diff --git a/cli/fix-requests-cli.py b/cli/fix-requests-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..5d765dd6dba17a80686b3f42fb5a66f83cc6330f --- /dev/null +++ b/cli/fix-requests-cli.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python + +import os +import fnmatch + +import json +from huggingface_hub import HfApi + + +def find_json_files(directory): + matches = [] + for root, dirnames, filenames in os.walk(directory): + for filename in fnmatch.filter(filenames, '*.json'): + matches.append(os.path.join(root, filename)) + return matches + + +directory_path = '/Users/pasquale/workspace/eval/requests' +json_files = find_json_files(directory_path) + +api = HfApi() +model_lst = api.list_models() + +model_lst = [m for m in model_lst] + +id_to_model = {m.id: m for m in model_lst} + +for path in json_files: + with open(path, 'r') as fr: + data = json.load(fr) + + model_id = data['model'] + if model_id in id_to_model: + model = id_to_model[model_id] + + to_overwrite = False + + is_finetuned = any(tag.startswith('base_model:') for tag in id_to_model[data['model']].tags) + + if is_finetuned: + data["model_type"] = "fine-tuned" + to_overwrite = True + + is_instruction_tuned = ('nstruct' in model_id) or ('chat' in model_id) + if is_instruction_tuned: + data["model_type"] = "instruction-tuned" + to_overwrite = True + + if to_overwrite is True: + with open(path, 'w') as fw: + json.dump(data, fw) + + else: + print(f'Model {model_id} not found') diff --git a/cli/halueval-upload-cli.py b/cli/halueval-upload-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..914a93fe6947aed3de0ee4d4deeaca45957852ce --- /dev/null +++ b/cli/halueval-upload-cli.py @@ -0,0 +1,72 @@ +#!/usr/bin/env python3 + +import random +import requests + +from datasets import load_dataset, Dataset, DatasetDict + + +path = 'pminervini/HaluEval' + +API_URL = f"https://datasets-server.huggingface.co/splits?dataset={path}" +response = requests.get(API_URL) +res_json = response.json() + +gold_splits = {'dialogue', 'qa', 'summarization', 'general'} + +available_splits = {split['config'] for split in res_json['splits']} if 'splits' in res_json else set() + +name_to_ds = dict() + +for name in gold_splits: + ds = load_dataset("json", data_files={'data': f"data/{name}_data.json"}) + name_to_ds[name] = ds + # if name not in available_splits: + ds.push_to_hub(path, config_name=name) + + +def list_to_dict(lst: list) -> dict: + res = dict() + for entry in lst: + for k, v in entry.items(): + if k not in res: + res[k] = [] + res[k] += [v] + return res + + +for name in (gold_splits - {'general'}): + random.seed(42) + ds = name_to_ds[name] + new_entry_lst = [] + + for entry in ds['data']: + is_hallucinated = random.random() > 0.5 + new_entry = None + if name in {'qa'}: + new_entry = { + 'knowledge': entry['knowledge'], + 'question': entry['question'], + 'answer': entry[f'{"hallucinated" if is_hallucinated else "right"}_answer'], + 'hallucination': 'yes' if is_hallucinated else 'no' + } + if name in {'dialogue'}: + new_entry = { + 'knowledge': entry['knowledge'], + 'dialogue_history': entry['dialogue_history'], + 'response': entry[f'{"hallucinated" if is_hallucinated else "right"}_response'], + 'hallucination': 'yes' if is_hallucinated else 'no' + } + if name in {'summarization'}: + new_entry = { + 'document': entry['document'], + 'summary': entry[f'{"hallucinated" if is_hallucinated else "right"}_summary'], + 'hallucination': 'yes' if is_hallucinated else 'no' + } + assert new_entry is not None + new_entry_lst += [new_entry] + new_ds_map = list_to_dict(new_entry_lst) + new_ds = Dataset.from_dict(new_ds_map) + new_dsd = DatasetDict({'data': new_ds}) + + new_dsd.push_to_hub(path, config_name=f'{name}_samples') diff --git a/cli/isp-upload-cli.py b/cli/isp-upload-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..9142a5e8fa33c26e66a0b03f71a4187994b0af1c --- /dev/null +++ b/cli/isp-upload-cli.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 + +import glob +import os + +from datasets import load_dataset + +folder_path = 'isp-data-json/' # Replace with your folder path + +# Search for all .json files in the folder +json_files = glob.glob(os.path.join(folder_path, '*.jsonl')) + +path = 'pminervini/inverse-scaling' + +for json_path in json_files: + base_name = os.path.basename(json_path) + name = base_name.split("_")[0] + + ds = load_dataset("json", data_files={'data': json_path}) + ds.push_to_hub(path, config_name=name) diff --git a/cli/nqswap-upload-cli.py b/cli/nqswap-upload-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..3a7cc6b33075aa1ae9402956104ce492ee25c9fc --- /dev/null +++ b/cli/nqswap-upload-cli.py @@ -0,0 +1,12 @@ +#!/usr/bin/env python3 + +from datasets import load_dataset + +path = 'pminervini/NQ-Swap' + +ds = load_dataset("json", + data_files={ + 'original': 'nqswap/original.jsonl', + 'substituted': 'nqswap/substituted.jsonl' + }) +ds.push_to_hub(path) diff --git a/cli/shroom-upload-cli.py b/cli/shroom-upload-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..680cba9d0304007f2ee4f50165d5a92f49b246d3 --- /dev/null +++ b/cli/shroom-upload-cli.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 + +import json +from datasets import Dataset, DatasetDict + +file_path = "shroom-data/val.model-agnostic.json" +ds_path = 'pminervini/shroom' + +with open(file_path, 'r') as file: + data = json.load(file) + + +def convert(list_of_dicts): + dict_of_lists = {} + for d in list_of_dicts: + for key, value in d.items(): + dict_of_lists.setdefault(key, []).append(value) + return dict_of_lists + + +task_to_data_map = {} + +for entry in data: + task_name = entry["task"] + del entry["task"] + if task_name not in task_to_data_map: + task_to_data_map[task_name] = [] + task_to_data_map[task_name] += [entry] + +task_to_ds_map = {k: Dataset.from_dict(convert(data)) for k, data in task_to_data_map.items()} + +ds_dict = DatasetDict(task_to_ds_map) + +ds_dict.push_to_hub(ds_path) diff --git a/cli/submit-cli.py b/cli/submit-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..eb0ac99c72df7916977f28b3b5729282253bc2d8 --- /dev/null +++ b/cli/submit-cli.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python + +import json +import os +import time + +from datetime import datetime, timezone + +from src.envs import API, EVAL_REQUESTS_PATH, H4_TOKEN, QUEUE_REPO +from src.submission.check_validity import already_submitted_models, get_model_size, is_model_on_hub + +from huggingface_hub import snapshot_download +from src.backend.envs import EVAL_REQUESTS_PATH_BACKEND +from src.backend.manage_requests import get_eval_requests +from src.backend.manage_requests import EvalRequest + + +def add_new_eval(model: str, base_model: str, revision: str, precision: str, private: bool, weight_type: str, model_type: str): + REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH) + + user_name = "" + model_path = model + if "/" in model: + tokens = model.split("/") + user_name = tokens[0] + model_path = tokens[1] + + precision = precision.split(" ")[0] + current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + if model_type is None or model_type == "": + return print("Please select a model type.") + + # Does the model actually exist? + if revision == "": + revision = "main" + + # Is the model on the hub? + if weight_type in ["Delta", "Adapter"]: + base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=H4_TOKEN, test_tokenizer=True) + if not base_model_on_hub: + print(f'Base model "{base_model}" {error}') + return + + if not weight_type == "Adapter": + model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, test_tokenizer=True) + if not model_on_hub: + print(f'Model "{model}" {error}') + return + + # Is the model info correctly filled? + try: + model_info = API.model_info(repo_id=model, revision=revision) + except Exception: + print("Could not get your model information. Please fill it up properly.") + return + + model_size = get_model_size(model_info=model_info, precision=precision) + + license = 'none' + try: + license = model_info.cardData["license"] + except Exception: + print("Please select a license for your model") + # return + + # modelcard_OK, error_msg = check_model_card(model) + # if not modelcard_OK: + # print(error_msg) + # return + + # Seems good, creating the eval + print("Adding new eval") + + eval_entry = { + "model": model, + "base_model": base_model, + "revision": revision, + "private": private, + "precision": precision, + "weight_type": weight_type, + "status": "PENDING", + "submitted_time": current_time, + "model_type": model_type, + "likes": model_info.likes, + "params": model_size, + "license": license, + } + + # Check for duplicate submission + if f"{model}_{revision}_{precision}" in REQUESTED_MODELS: + print("This model has been already submitted.") + return + + print("Creating eval file") + OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}" + os.makedirs(OUT_DIR, exist_ok=True) + out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}.json" + + with open(out_path, "w") as f: + f.write(json.dumps(eval_entry)) + + print("Uploading eval file") + API.upload_file(path_or_fileobj=out_path, path_in_repo=out_path.split("eval-queue/")[1], + repo_id=QUEUE_REPO, repo_type="dataset", commit_message=f"Add {model} to eval queue") + + # Remove the local file + os.remove(out_path) + + print("Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list.") + return + + +def main(): + from huggingface_hub import HfApi + + api = HfApi() + model_lst = api.list_models() + + model_lst = [m for m in model_lst] + + def custom_filter(m) -> bool: + # res = m.pipeline_tag in {'text-generation'} and 'en' in m.tags and m.private is False + # res = m.pipeline_tag in {'text-generation'} and 'en' in m.tags and m.private is False and 'mistralai/' in m.id + res = 'mistralai/' in m.id + return res + + filtered_model_lst = sorted([m for m in model_lst if custom_filter(m)], key=lambda m: m.downloads, reverse=True) + + snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND, repo_type="dataset", max_workers=60) + + PENDING_STATUS = "PENDING" + RUNNING_STATUS = "RUNNING" + FINISHED_STATUS = "FINISHED" + FAILED_STATUS = "FAILED" + + status = [PENDING_STATUS, RUNNING_STATUS, FINISHED_STATUS, FAILED_STATUS] + + # Get all eval requests + eval_requests: list[EvalRequest] = get_eval_requests(job_status=status, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND) + + requested_model_names = {e.model for e in eval_requests} + + # breakpoint() + + for i in range(min(200, len(filtered_model_lst))): + model = filtered_model_lst[i] + + print(f'Considering {model.id} ..') + + is_finetuned = any(tag.startswith('base_model:') for tag in model.tags) + + model_type = 'pretrained' + if is_finetuned: + model_type = "fine-tuned" + + is_instruction_tuned = 'nstruct' in model.id + if is_instruction_tuned: + model_type = "instruction-tuned" + + if model.id not in requested_model_names: + + if 'mage' not in model.id: + add_new_eval(model=model.id, base_model='', revision='main', precision='float32', private=False, weight_type='Original', model_type=model_type) + time.sleep(10) + else: + print(f'Model {model.id} already added, not adding it to the queue again.') + + +if __name__ == "__main__": + main() diff --git a/cli/sync-open-llm-cli.py b/cli/sync-open-llm-cli.py new file mode 100644 index 0000000000000000000000000000000000000000..41322ca6a4598f7144eea5e52f3371fd8d2bed0a --- /dev/null +++ b/cli/sync-open-llm-cli.py @@ -0,0 +1,91 @@ +import os +import json +import glob + +from tqdm import tqdm +from huggingface_hub import HfApi, snapshot_download +from src.backend.manage_requests import EvalRequest +from src.backend.envs import EVAL_REQUESTS_PATH_BACKEND_SYNC +from src.envs import QUEUE_REPO, API +from src.envs import EVAL_REQUESTS_PATH_OPEN_LLM, QUEUE_REPO_OPEN_LLM +from src.utils import my_snapshot_download + +def my_set_eval_request(api, json_filepath, hf_repo, local_dir): + for i in range(10): + try: + set_eval_request(api=api, json_filepath=json_filepath, hf_repo=hf_repo, local_dir=local_dir) + return + except Exception: + time.sleep(60) + return + + +def set_eval_request(api: HfApi, json_filepath: str, hf_repo: str, local_dir: str): + """Updates a given eval request with its new status on the hub (running, completed, failed, ...)""" + + with open(json_filepath) as fp: + data = json.load(fp) + + with open(json_filepath, "w") as f: + f.write(json.dumps(data)) + + api.upload_file(path_or_fileobj=json_filepath, path_in_repo=json_filepath.replace(local_dir, ""), + repo_id=hf_repo, repo_type="dataset") + + +def get_request_file_for_model(data, requests_path): + model_name = data["model"] + precision = data["precision"] + """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED and RUNNING""" + request_files = os.path.join( + requests_path, + f"{model_name}_eval_request_*.json", + ) + request_files = glob.glob(request_files) + + # Select correct request file (precision) + request_file = "" + request_files = sorted(request_files, reverse=True) + + for tmp_request_file in request_files: + with open(tmp_request_file, "r") as f: + req_content = json.load(f) + if req_content["precision"] == precision.split(".")[-1]: + request_file = tmp_request_file + return request_file + +def update_model_type(data, requests_path): + open_llm_request_file = get_request_file_for_model(data, requests_path) + + try: + with open(open_llm_request_file, "r") as f: + open_llm_request = json.load(f) + data["model_type"] = open_llm_request["model_type"] + return True, data + except: + return False, data + + +def read_and_write_json_files(directory, requests_path_open_llm): + # Walk through the directory + for subdir, dirs, files in tqdm(os.walk(directory), desc="updating model type according to open llm leaderboard"): + for file in files: + # Check if the file is a JSON file + if file.endswith('.json'): + file_path = os.path.join(subdir, file) + # Open and read the JSON file + with open(file_path, 'r') as json_file: + data = json.load(json_file) + sucess, data = update_model_type(data, requests_path_open_llm) + if sucess: + with open(file_path, 'w') as json_file: + json.dump(data, json_file) + my_set_eval_request(api=API, json_filepath=file_path, hf_repo=QUEUE_REPO, local_dir=EVAL_REQUESTS_PATH_BACKEND_SYNC) + + + + +if __name__ == "__main__": + my_snapshot_download(repo_id=QUEUE_REPO_OPEN_LLM, revision="main", local_dir=EVAL_REQUESTS_PATH_OPEN_LLM, repo_type="dataset", max_workers=60) + my_snapshot_download(repo_id=QUEUE_REPO, revision="main", local_dir=EVAL_REQUESTS_PATH_BACKEND_SYNC, repo_type="dataset", max_workers=60) + read_and_write_json_files(EVAL_REQUESTS_PATH_BACKEND_SYNC, EVAL_REQUESTS_PATH_OPEN_LLM) \ No newline at end of file diff --git a/cli/truefalse-upload-cli.py b/cli/truefalse-upload-cli.py new file mode 100755 index 0000000000000000000000000000000000000000..98f75a2bb6863aaa9a95ef3adf112787225c12a3 --- /dev/null +++ b/cli/truefalse-upload-cli.py @@ -0,0 +1,15 @@ +#!/usr/bin/env python3 + +import glob +import os + +from datasets import load_dataset + +path = 'pminervini/true-false' +folder_path = 'true-false-data/' # Replace with your folder path + +# Search for all .json files in the folder +csv_files = glob.glob(os.path.join(folder_path, '*.csv')) + +ds = load_dataset("csv", data_files={os.path.basename(path).split("_")[0]: path for path in csv_files}) +ds.push_to_hub(path) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000000000000000000000000000000000..3b4737924b5a7d81c962a4e28b66ac6cdcc3b004 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,13 @@ +[tool.ruff] +# Enable pycodestyle (`E`) and Pyflakes (`F`) codes by default. +select = ["E", "F"] +ignore = ["E501"] # line too long (black is taking care of this) +line-length = 119 +fixable = ["A", "B", "C", "D", "E", "F", "G", "I", "N", "Q", "S", "T", "W", "ANN", "ARG", "BLE", "COM", "DJ", "DTZ", "EM", "ERA", "EXE", "FBT", "ICN", "INP", "ISC", "NPY", "PD", "PGH", "PIE", "PL", "PT", "PTH", "PYI", "RET", "RSE", "RUF", "SIM", "SLF", "TCH", "TID", "TRY", "UP", "YTT"] + +[tool.isort] +profile = "black" +line_length = 119 + +[tool.black] +line-length = 119 diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d7eb7fd8dd1123f2c42692ede6ba11adb4358c6 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,32 @@ +torch +colorama +APScheduler +black +click +datasets +gradio +gradio_client +huggingface-hub +matplotlib +numpy +pandas +plotly +python-dateutil +requests +semantic-version +tqdm +wandb +transformers>=4.36.0 +tokenizers>=0.15.0 +lm_eval[ifeval] @ git+https://github.com/EleutherAI/lm-evaluation-harness.git +accelerate +sentencepiece +langdetect +sacrebleu +cchardet +rouge_score +bert-score +evaluate +spacy +selfcheckgpt +immutabledict diff --git a/src/backend/envs.py b/src/backend/envs.py new file mode 100644 index 0000000000000000000000000000000000000000..89572dece4ae4da30f587792741535e2ac3ef845 --- /dev/null +++ b/src/backend/envs.py @@ -0,0 +1,67 @@ +import os + +import torch + +from dataclasses import dataclass +from enum import Enum + +from src.envs import CACHE_PATH + + +@dataclass +class Task: + benchmark: str + metric: str + col_name: str + num_fewshot: int + + +class Tasks(Enum): + task0 = Task("nq_open", "em", "NQ Open", 64) # 64, as in the ATLAS paper + task1 = Task("triviaqa", "em", "TriviaQA", 64) # 64, as in the ATLAS paper + + task11 = Task("nq8", "em", "NQ Open 8", 8) + task12 = Task("tqa8", "em", "TriviaQA 8", 8) + + # TruthfulQA is intended as a zero-shot benchmark [5, 47]. https://owainevans.github.io/pdfs/truthfulQA_lin_evans.pdf + task2 = Task("truthfulqa_gen", "rougeL_acc", "TruthfulQA Gen", 0) + task3 = Task("truthfulqa_mc1", "acc", "TruthfulQA MC1", 0) + task4 = Task("truthfulqa_mc2", "acc", "TruthfulQA MC2", 0) + + task5 = Task("halueval_qa", "acc", "HaluEval QA", 0) + task6 = Task("halueval_dialogue", "acc", "HaluEval Dialogue", 0) + task7 = Task("halueval_summarization", "acc", "HaluEval Summarization", 0) + + # task8 = Task("xsum", "rougeL", "XSum", 2) + # task9 = Task("cnndm", "rougeL", "CNN/DM", 2) + + task8_1 = Task("xsum_v2", "rougeL", "XSum", 0) + task9_1 = Task("cnndm_v2", "rougeL", "CNN/DM", 0) + + task10 = Task("memo-trap", "acc", "memo-trap", 0) + task10_2 = Task("memo-trap_v2", "acc", "memo-trap", 0) + + task13 = Task("ifeval", "prompt_level_strict_acc", "IFEval", 0) + + task14 = Task("selfcheckgpt", "max-selfcheckgpt", "SelfCheckGPT", 0) + + # task15 = Task("fever10", "acc", "FEVER", 16) + # task15_1 = Task("fever11", "acc", "FEVER", 8) + + task16 = Task("squadv2", "exact", "SQuADv2", 4) + + task17 = Task("truefalse_cieacf", "acc", "TrueFalse", 8) + + # task18 = Task("faithdial_hallu", "acc", "FaithDial", 8) + task19 = Task("faithdial_hallu_v2", "acc", "FaithDial", 8) + + task20 = Task("race", "acc", "RACE", 0) + + +EVAL_REQUESTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-queue-bk") +EVAL_REQUESTS_PATH_BACKEND_SYNC = os.path.join(CACHE_PATH, "eval-queue-bk-sync") +EVAL_RESULTS_PATH_BACKEND = os.path.join(CACHE_PATH, "eval-results-bk") + +DEVICE = "cuda" if torch.cuda.is_available() else 'cpu' + +LIMIT = None # Testing; needs to be None diff --git a/src/backend/huggingface_generate_until.py b/src/backend/huggingface_generate_until.py new file mode 100644 index 0000000000000000000000000000000000000000..c488f0219eae81b823a46a67de966819688ee1bb --- /dev/null +++ b/src/backend/huggingface_generate_until.py @@ -0,0 +1,57 @@ +from typing import List, Literal, Optional, Tuple, Union +import torch +import transformers + +from lm_eval.models.huggingface import HFLM +from lm_eval.api.registry import register_model + +@register_model('hf-chat') +class HFLMwithChatTemplate(HFLM): + def __init__(self, use_chat_template=True, **kwargs): + super().__init__(**kwargs) + self.use_chat_template = use_chat_template + + def tok_batch_encode( + self, + strings: List[str], + padding_side: str = "left", + left_truncate_len: int = None, + truncation: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + + if self.use_chat_template: + try: + updated_strings = [] + for input_string in strings: + messages = [ + {"role": "user", "content": f"{input_string}"}, + ] + updated_string = self.tokenizer.apply_chat_template(messages, tokenize=False) + updated_strings.append(updated_string) + strings = updated_strings[:] + except: + print(f"failed to update input string with chat template: {self._model}") + # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode. + old_padding_side = self.tokenizer.padding_side + self.tokenizer.padding_side = padding_side + + if self.AUTO_MODEL_CLASS == transformers.AutoModelForCausalLM: + add_special_tokens = False + elif self.AUTO_MODEL_CLASS == transformers.AutoModelForSeq2SeqLM: + add_special_tokens = True + + encoding = self.tokenizer( + strings, + truncation=truncation, + padding="longest", + return_tensors="pt", + add_special_tokens=add_special_tokens, + ) + if left_truncate_len: + encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:] + encoding["attention_mask"] = encoding["attention_mask"][ + :, -left_truncate_len: + ] + self.tokenizer.padding_side = old_padding_side + + return encoding["input_ids"], encoding["attention_mask"] \ No newline at end of file diff --git a/src/backend/manage_requests.py b/src/backend/manage_requests.py new file mode 100644 index 0000000000000000000000000000000000000000..2dd302b2bd351f383ff556f664fe6d98a7841a64 --- /dev/null +++ b/src/backend/manage_requests.py @@ -0,0 +1,113 @@ +import glob +import json +from dataclasses import dataclass +from typing import Optional + +from huggingface_hub import HfApi, snapshot_download + +from src.utils import my_snapshot_download + + +@dataclass +class EvalRequest: + model: str + private: bool + status: str + json_filepath: str + weight_type: str = "Original" + model_type: str = "" # pretrained, finetuned, with RL + precision: str = "" # float16, bfloat16 + base_model: Optional[str] = None # for adapter models + revision: str = "main" # commit + submitted_time: Optional[str] = "2022-05-18T11:40:22.519222" # random date just so that we can still order requests by date + model_type: Optional[str] = None + likes: Optional[int] = 0 + params: Optional[int] = None + license: Optional[str] = "" + def get_model_args(self) -> str: + model_args = f"pretrained={self.model},revision={self.revision},parallelize=True" # ,max_length=4096" + + if self.precision in ["float16", "float32", "bfloat16"]: + model_args += f",dtype={self.precision}" + # Quantized models need some added config, the install of bits and bytes, etc + #elif self.precision == "8bit": + # model_args += ",load_in_8bit=True" + #elif self.precision == "4bit": + # model_args += ",load_in_4bit=True" + #elif self.precision == "GPTQ": + # A GPTQ model does not need dtype to be specified, + # it will be inferred from the config + pass + else: + raise Exception(f"Unknown precision {self.precision}.") + return model_args + + +def set_eval_request(api: HfApi, eval_request: EvalRequest, set_to_status: str, hf_repo: str, local_dir: str): + """Updates a given eval request with its new status on the hub (running, completed, failed, ...)""" + json_filepath = eval_request.json_filepath + + with open(json_filepath) as fp: + data = json.load(fp) + + data["status"] = set_to_status + + with open(json_filepath, "w") as f: + f.write(json.dumps(data)) + + api.upload_file(path_or_fileobj=json_filepath, path_in_repo=json_filepath.replace(local_dir, ""), + repo_id=hf_repo, repo_type="dataset") + + +def get_eval_requests(job_status: list, local_dir: str, hf_repo: str, do_download: bool = True) -> list[EvalRequest]: + """Get all pending evaluation requests and return a list in which private + models appearing first, followed by public models sorted by the number of + likes. + + Returns: + `list[EvalRequest]`: a list of model info dicts. + """ + if do_download: + my_snapshot_download(repo_id=hf_repo, revision="main", local_dir=local_dir, repo_type="dataset", max_workers=60) + + json_files = glob.glob(f"{local_dir}/**/*.json", recursive=True) + + eval_requests = [] + for json_filepath in json_files: + with open(json_filepath) as fp: + data = json.load(fp) + if data["status"] in job_status: + # import pdb + # breakpoint() + data["json_filepath"] = json_filepath + + if 'job_id' in data: + del data['job_id'] + + eval_request = EvalRequest(**data) + eval_requests.append(eval_request) + + return eval_requests + + +def check_completed_evals(api: HfApi, hf_repo: str, local_dir: str, checked_status: str, completed_status: str, + failed_status: str, hf_repo_results: str, local_dir_results: str): + """Checks if the currently running evals are completed, if yes, update their status on the hub.""" + my_snapshot_download(repo_id=hf_repo_results, revision="main", local_dir=local_dir_results, repo_type="dataset", max_workers=60) + + running_evals = get_eval_requests([checked_status], hf_repo=hf_repo, local_dir=local_dir) + + for eval_request in running_evals: + model = eval_request.model + print("====================================") + print(f"Checking {model}") + + output_path = model + output_file = f"{local_dir_results}/{output_path}/results*.json" + output_file_exists = len(glob.glob(output_file)) > 0 + + if output_file_exists: + print(f"EXISTS output file exists for {model} setting it to {completed_status}") + set_eval_request(api, eval_request, completed_status, hf_repo, local_dir) + + diff --git a/src/backend/moe_infinity.py b/src/backend/moe_infinity.py new file mode 100644 index 0000000000000000000000000000000000000000..8b94ca0b24305fe661fa479df85da038af3eded0 --- /dev/null +++ b/src/backend/moe_infinity.py @@ -0,0 +1,99 @@ +import torch +import os +from transformers import AutoTokenizer +import transformers +from moe_infinity import MoE +from typing import List, Tuple, Optional, Union + +from lm_eval.models.huggingface import HFLM +from lm_eval.api.registry import register_model + +@register_model('moe-infinity') +class MoEHFLM(HFLM): + def __init__( + self, + pretrained: str = "mistralai/Mixtral-8x7B-Instruct-v0.1", + moe_config: dict = None, + offload_path = os.path.expanduser('~'), + device_memory_ratio = 0.75, + use_chat_template=True, + *args, + **kwargs + ): + # Initialize parent class without calling _create_model in the parent's __init__ + self.checkpoint = pretrained + self.moe_config = moe_config if moe_config is not None else {} + self.offload_path = offload_path + self.device_memory_ratio = device_memory_ratio + self.use_chat_template = use_chat_template + super().__init__(*args, **kwargs, pretrained=pretrained) # Assuming HFLM accepts a 'pretrained' arg and handles it + # self._create_model() + + def _create_model(self, *args, **kwargs): + """ + Initializes the MoE model from MoE-infinity with the provided configuration. + """ + # Ensure default configurations are set if not provided + default_moe_config = { + "offload_path": os.path.join(self.offload_path, "moe-infinity-offloads"), + "device_memory_ratio": self.device_memory_ratio, # Default value, adjust as necessary + } + # Update default config with any user-provided config + final_moe_config = {**default_moe_config, **self.moe_config} + self._model = MoE(self.checkpoint, final_moe_config) + + @property + def max_length(self): + if self._max_length: # if max length manually set, return it + return self._max_length + seqlen_config_attrs = ("n_positions", "max_position_embeddings", "n_ctx") + for attr in seqlen_config_attrs: + if hasattr(self.model.model.config, attr): + return getattr(self.model.model.config, attr) + if hasattr(self.tokenizer, "model_max_length"): + if self.tokenizer.model_max_length == 1000000000000000019884624838656: + return self._DEFAULT_MAX_LENGTH + return self.tokenizer.model_max_length + return self._DEFAULT_MAX_LENGTH + + def tok_batch_encode( + self, + strings: List[str], + padding_side: str = "left", + left_truncate_len: int = None, + truncation: bool = False, + ) -> Tuple[torch.Tensor, torch.Tensor]: + + if self.use_chat_template: + try: + updated_strings = [] + for input_string in strings: + messages = [ + {"role": "user", "content": f"{input_string}"}, + ] + updated_string = self.tokenizer.apply_chat_template(messages, tokenize=False) + updated_strings.append(updated_string) + strings = updated_strings[:] + except: + print(f"failed to update input string with chat template: {self._model}") + # encode a batch of strings. converts to tensors and pads automatically, unlike tok_encode. + old_padding_side = self.tokenizer.padding_side + self.tokenizer.padding_side = padding_side + + add_special_tokens = False + + encoding = self.tokenizer( + strings, + truncation=truncation, + padding="longest", + return_tensors="pt", + add_special_tokens=add_special_tokens, + ) + if left_truncate_len: + encoding["input_ids"] = encoding["input_ids"][:, -left_truncate_len:] + encoding["attention_mask"] = encoding["attention_mask"][ + :, -left_truncate_len: + ] + self.tokenizer.padding_side = old_padding_side + + return encoding["input_ids"], encoding["attention_mask"] diff --git a/src/backend/run_eval_suite.py b/src/backend/run_eval_suite.py new file mode 100644 index 0000000000000000000000000000000000000000..799fcc6111aa154dd171d755c0faaae649a5a3dd --- /dev/null +++ b/src/backend/run_eval_suite.py @@ -0,0 +1,62 @@ +from lm_eval import evaluator +from lm_eval.tasks import TaskManager + +from src.backend.manage_requests import EvalRequest + +from src.backend.tasks.xsum.task import XSum +from src.backend.tasks.xsum.task_v2 import XSumv2 + +from src.backend.tasks.cnndm.task import CNNDM +from src.backend.tasks.cnndm.task_v2 import CNNDMv2 + +from src.backend.tasks.selfcheckgpt.task import SelfCheckGPT + +from src.backend.huggingface_generate_until import HFLMwithChatTemplate +from src.backend.moe_infinity import MoEHFLM + +def run_evaluation(eval_request: EvalRequest, task_names, num_fewshot, batch_size, device, use_cache=None, limit=None, max_nb_samples=100) -> dict: + if limit: + print("WARNING: --limit SHOULD ONLY BE USED FOR TESTING. REAL METRICS SHOULD NOT BE COMPUTED USING LIMIT.") + + # include_task_folder("src/backend/tasks/") + # initialize_tasks('INFO') + + print(f"Allocating task manager for: {task_names}") + + task_manager = TaskManager(include_path="./src/backend/tasks/") + # task_manager.initialize_tasks('INFO') + + print(f"Considered Tasks: {task_names}") + # print(f"Allowed Tasks: {tasks.ALL_TASKS}") + + # task_names = utils.pattern_match(task_names, tasks.ALL_TASKS) + + print(f"Selected Tasks: {task_names}") + print(f"Eval Request: {eval_request.get_model_args()}") + # hf-chat is implemented to use apply_chat_template + results = evaluator.simple_evaluate(model="moe-infinity", # "hf-causal-experimental", # "hf-causal", hf-chat + model_args=eval_request.get_model_args(), + tasks=task_names, + num_fewshot=num_fewshot, + batch_size=batch_size, + max_batch_size=8, + device=device, + use_cache=use_cache, + limit=limit, + write_out=True, + task_manager=task_manager) + + results["config"]["model_dtype"] = eval_request.precision + results["config"]["model_name"] = eval_request.model + results["config"]["model_sha"] = eval_request.revision + + if max_nb_samples is not None: + if 'samples' in results: + samples = results['samples'] + for task_name in samples.keys(): + if len(samples[task_name]) > max_nb_samples: + results['samples'][task_name] = results['samples'][task_name][:max_nb_samples] + + # print(evaluator.make_table(results)) + + return results diff --git a/src/backend/sort_queue.py b/src/backend/sort_queue.py new file mode 100644 index 0000000000000000000000000000000000000000..4a7a957ca63e094ed4f76aea449668442e21c99a --- /dev/null +++ b/src/backend/sort_queue.py @@ -0,0 +1,28 @@ +from dataclasses import dataclass +from huggingface_hub import HfApi +from src.backend.manage_requests import EvalRequest + + +@dataclass +class ModelMetadata: + likes: int = 0 + size: int = 15 + + +def sort_models_by_priority(api: HfApi, models: list[EvalRequest]) -> list[EvalRequest]: + private_models = [model for model in models if model.private] + public_models = [model for model in models if not model.private] + + return sort_by_submit_date(private_models) + sort_by_submit_date(public_models) + + +def sort_by_submit_date(eval_requests: list[EvalRequest]) -> list[EvalRequest]: + return sorted(eval_requests, key=lambda x: x.submitted_time, reverse=False) + + +def sort_by_size(eval_requests: list[EvalRequest]) -> list[EvalRequest]: + return sorted(eval_requests, key=lambda x: x.params, reverse=False) + + +def sort_by_likes(eval_requests: list[EvalRequest]) -> list[EvalRequest]: + return sorted(eval_requests, key=lambda x: x.likes, reverse=False) diff --git a/src/backend/tasks/__init__.py b/src/backend/tasks/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb7017ca3330d6efcef7887d07b5c02b48b85114 --- /dev/null +++ b/src/backend/tasks/__init__.py @@ -0,0 +1,7 @@ +from src.backend.tasks.xsum.task import XSum +from src.backend.tasks.xsum.task_v2 import XSumv2 + +from src.backend.tasks.cnndm.task import CNNDM +from src.backend.tasks.cnndm.task_v2 import CNNDMv2 + +from src.backend.tasks.selfcheckgpt.task import SelfCheckGPT diff --git a/src/backend/tasks/cnndm/README.md b/src/backend/tasks/cnndm/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bad0c4e2d80ec17c3f4a4c2f15db2ce6a6632db4 --- /dev/null +++ b/src/backend/tasks/cnndm/README.md @@ -0,0 +1,54 @@ +# Task-name + +### Paper + +Title: `Know What You Don’t Know: Unanswerable Questions for SQuAD` +Abstract: https://arxiv.org/abs/1806.03822 + +Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, +consisting of questions posed by crowdworkers on a set of Wikipedia articles, +where the answer to every question is a segment of text, or span, from the +corresponding reading passage, or the question might be unanswerable. +SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable +questions written adversarially by crowdworkers to look similar to answerable ones. +To do well on SQuAD2.0, systems must not only answer questions when possible, but +also determine when no answer is supported by the paragraph and abstain from answering. + +Homepage: https://rajpurkar.github.io/SQuAD-explorer/ + + +### Citation + +``` +@misc{rajpurkar2018know, + title={Know What You Don't Know: Unanswerable Questions for SQuAD}, + author={Pranav Rajpurkar and Robin Jia and Percy Liang}, + year={2018}, + eprint={1806.03822}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `squadv2`: `Default squadv2 task` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/src/backend/tasks/cnndm/cnndm.yaml b/src/backend/tasks/cnndm/cnndm.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3299a127c3dd19e884f0ec6cab0baf469e8b2f70 --- /dev/null +++ b/src/backend/tasks/cnndm/cnndm.yaml @@ -0,0 +1,2 @@ +task: cnndm +class: !function task.CNNDM diff --git a/src/backend/tasks/cnndm/cnndm_v2.yaml b/src/backend/tasks/cnndm/cnndm_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3c397b056f47cf0b286140c439bdf9752cc41b13 --- /dev/null +++ b/src/backend/tasks/cnndm/cnndm_v2.yaml @@ -0,0 +1,2 @@ +task: cnndm_v2 +class: !function task_v2.CNNDMv2 diff --git a/src/backend/tasks/cnndm/task.py b/src/backend/tasks/cnndm/task.py new file mode 100644 index 0000000000000000000000000000000000000000..f48fadcb060c63182d8ad0ff58d4c1d1c14a80cc --- /dev/null +++ b/src/backend/tasks/cnndm/task.py @@ -0,0 +1,194 @@ +from lm_eval.api.task import ConfigurableTask +from lm_eval.api.instance import Instance +# from lm_eval.api.registry import register_task +from lm_eval.api.metrics import mean + +import torch +import sacrebleu +from rouge_score import rouge_scorer, scoring + + +def bleu(refs, preds): + """ + Returns `t5` style BLEU scores. See the related implementation: + https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L41 + + :param refs: + A `list` of `list` of reference `str`s. + :param preds: + A `list` of predicted `str`s. + """ + score = sacrebleu.corpus_bleu( + preds, + refs, + smooth_method="exp", + smooth_value=0.0, + force=False, + lowercase=False, + tokenize="intl", + use_effective_order=False, + ).score + return score + + +def rouge(refs, preds): + """ + Returns `t5` style ROUGE scores. See the related implementation: + https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L68 + + :param refs: + A `list` of reference `strs`. + :param preds: + A `list` of predicted `strs`. + """ + rouge_types = ["rouge1", "rouge2", "rougeLsum"] + scorer = rouge_scorer.RougeScorer(rouge_types) + # Add newlines between sentences to correctly compute `rougeLsum`. + + def _prepare_summary(summary): + summary = summary.replace(" . ", ".\n") + return summary + + # Accumulate confidence intervals. + aggregator = scoring.BootstrapAggregator() + for ref, pred in zip(refs, preds): + ref = _prepare_summary(ref) + pred = _prepare_summary(pred) + aggregator.add_scores(scorer.score(ref, pred)) + result = aggregator.aggregate() + return {type: result[type].mid.fmeasure * 100 for type in rouge_types} + + +# @register_task("cnndm") +class CNNDM(ConfigurableTask): + VERSION = 0 + DATASET_PATH = "cnn_dailymail" + DATASET_NAME = "3.0.0" + + def __init__(self): + super().__init__(config={'metadata': {'version': self.VERSION}}) + self.factkb_tokenizer = None + self.factkb_model = None + self.bert_score = None + + def maybe_init_factkb(self): + if self.factkb_tokenizer is None or self.factkb_model is None: + from transformers import AutoTokenizer, AutoModelForSequenceClassification + self.factkb_tokenizer = AutoTokenizer.from_pretrained("roberta-base", padding="max_length", truncation=True) + self.factkb_model = AutoModelForSequenceClassification.from_pretrained("bunsenfeng/FactKB", num_labels=2, device_map="auto") + + def maybe_init_bertscore(self): + if self.bert_score is None: + from evaluate import load + self.bert_score = load("bertscore") + + def has_training_docs(self): + return True + + def has_validation_docs(self): + return True + + def has_test_docs(self): + return True + + def training_docs(self): + return self.dataset["train"] + + def validation_docs(self): + return self.dataset["validation"] + + def test_docs(self): + return self.dataset["test"] + + def doc_to_text(self, doc): + return f'Document: {doc["article"]}\nSummary:' + + @staticmethod + def should_decontaminate(): + return True + + def doc_to_decontamination_query(self, doc): + return doc["article"] + + def doc_to_target(self, doc): + return doc["highlights"] + + def construct_requests(self, doc, ctx, **kwargs): + """Uses RequestFactory to construct Requests and returns an iterable of + Requests which will be sent to the LM. + + :param doc: + The document as returned from training_docs, validation_docs, or test_docs. + :param ctx: str + The context string, generated by fewshot_context. This includes the natural + language description, as well as the few shot examples, and the question + part of the document for `doc`. + """ + + return [ + Instance( + request_type="generate_until", + doc=doc, + arguments=(ctx, {"until": ["\n"]}), + idx=0, + **kwargs + ) + ] + + def process_results(self, doc, results): + completion = results[0] + # true_refs, false_refs = doc["correct_answers"], doc["incorrect_answers"] + # all_refs = true_refs + false_refs + + document = doc["article"] + gold_summary = doc["highlights"] + + true_refs = [doc["highlights"]] + all_refs = true_refs + + # ROUGE-N + rouge_scores = [rouge([ref], [completion]) for ref in all_refs] + # ROUGE-1 + rouge1_scores = [score["rouge1"] for score in rouge_scores] + # ROUGE-2 + rouge2_scores = [score["rouge2"] for score in rouge_scores] + # ROUGE-L + rougeL_scores = [score["rougeLsum"] for score in rouge_scores] + + self.maybe_init_factkb() + input_factkb = [[completion, document]] + factkb_tokens = self.factkb_tokenizer(input_factkb, return_tensors="pt", padding="max_length", truncation=True).to(self.factkb_model.device) + factkb_logits = self.factkb_model(**factkb_tokens).logits + factkb_res = torch.softmax(factkb_logits, dim=1) + + self.maybe_init_bertscore() + bert_score_res = self.bert_score.compute(predictions=[completion], references=[gold_summary], model_type="microsoft/deberta-xlarge-mnli", lang="en") + + res = { + "rouge1": rouge1_scores[0], + "rouge2": rouge2_scores[0], + "rougeL": rougeL_scores[0], + "factKB": float(factkb_res[0][1]), + "bertscore_precision": float(bert_score_res["precision"][0]), + "bertscore_recall": float(bert_score_res["recall"][0]), + "bertscore_f1": float(bert_score_res["f1"][0]) + } + + return res + + def aggregation(self): + """ + :returns: {str: [float] -> float} + A dictionary where keys are the names of submetrics and values are + functions that aggregate a list of metrics + """ + return {k: mean for k in ["rouge1", "rouge2", "rougeL", "factKB", "bertscore_precision", "bertscore_recall", "bertscore_f1"]} + + def higher_is_better(self): + """ + :returns: {str: bool} + A dictionary where keys are the names of submetrics and values are + whether a higher value of the submetric is better + """ + return {k: True for k in ["rouge1", "rouge2", "rougeL", "factKB", "bertscore_precision", "bertscore_recall", "bertscore_f1"]} + diff --git a/src/backend/tasks/cnndm/task_v2.py b/src/backend/tasks/cnndm/task_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..528384d1bbc1a46e4d8250f82de69d3678ec645e --- /dev/null +++ b/src/backend/tasks/cnndm/task_v2.py @@ -0,0 +1,203 @@ +from lm_eval.api.task import ConfigurableTask +from lm_eval.api.instance import Instance +# from lm_eval.api.registry import register_task +from lm_eval.api.metrics import mean + +import torch +import sacrebleu +from rouge_score import rouge_scorer, scoring + + +def bleu(refs, preds): + """ + Returns `t5` style BLEU scores. See the related implementation: + https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L41 + + :param refs: + A `list` of `list` of reference `str`s. + :param preds: + A `list` of predicted `str`s. + """ + score = sacrebleu.corpus_bleu( + preds, + refs, + smooth_method="exp", + smooth_value=0.0, + force=False, + lowercase=False, + tokenize="intl", + use_effective_order=False, + ).score + return score + + +def rouge(refs, preds): + """ + Returns `t5` style ROUGE scores. See the related implementation: + https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L68 + + :param refs: + A `list` of reference `strs`. + :param preds: + A `list` of predicted `strs`. + """ + rouge_types = ["rouge1", "rouge2", "rougeLsum"] + scorer = rouge_scorer.RougeScorer(rouge_types) + # Add newlines between sentences to correctly compute `rougeLsum`. + + def _prepare_summary(summary): + summary = summary.replace(" . ", ".\n") + return summary + + # Accumulate confidence intervals. + aggregator = scoring.BootstrapAggregator() + for ref, pred in zip(refs, preds): + ref = _prepare_summary(ref) + pred = _prepare_summary(pred) + aggregator.add_scores(scorer.score(ref, pred)) + result = aggregator.aggregate() + return {type: result[type].mid.fmeasure * 100 for type in rouge_types} + + +# @register_task("cnndm_v2") +class CNNDMv2(ConfigurableTask): + VERSION = 2 + DATASET_PATH = "cnn_dailymail" + DATASET_NAME = "3.0.0" + + def __init__(self): + super().__init__(config={'metadata': {'version': self.VERSION}, + 'generation_kwargs': {'do_sample': False, 'temperature': 0.0, 'until': ['\n', '\n\n']}}) + self.factkb_tokenizer = None + self.factkb_model = None + self.bert_score = None + + def maybe_init_factkb(self): + if self.factkb_tokenizer is None or self.factkb_model is None: + from transformers import AutoTokenizer, AutoModelForSequenceClassification + self.factkb_tokenizer = AutoTokenizer.from_pretrained("roberta-base", padding="max_length", truncation=True) + self.factkb_model = AutoModelForSequenceClassification.from_pretrained("bunsenfeng/FactKB", num_labels=2, device_map="auto") + + def maybe_init_bertscore(self): + if self.bert_score is None: + from evaluate import load + self.bert_score = load("bertscore") + + def has_training_docs(self): + return True + + def has_validation_docs(self): + return True + + def has_test_docs(self): + return True + + def training_docs(self): + return self.dataset["train"] + + def validation_docs(self): + return self.dataset["validation"] + + def test_docs(self): + return self.dataset["test"] + + # def custom_prompt(self): + # res = "Provide a summary of the provided article." + # return res + + # def fewshot_delimiter(self): + # return "\n\n" + + # From https://arxiv.org/abs/2305.14739 + def doc_to_text(self, doc): + return f'Article: {doc["article"]}\nSummarize the article. Summary:' + + @staticmethod + def should_decontaminate(): + return True + + def doc_to_decontamination_query(self, doc): + return doc["article"] + + def doc_to_target(self, doc): + return doc["highlights"] + + def construct_requests(self, doc, ctx, **kwargs): + """Uses RequestFactory to construct Requests and returns an iterable of + Requests which will be sent to the LM. + + :param doc: + The document as returned from training_docs, validation_docs, or test_docs. + :param ctx: str + The context string, generated by fewshot_context. This includes the natural + language description, as well as the few shot examples, and the question + part of the document for `doc`. + """ + + return [ + Instance( + request_type="generate_until", + doc=doc, + arguments=(ctx, {"until": ["\n"]}), + idx=0, + **kwargs + ) + ] + + def process_results(self, doc, results): + completion = results[0] + # true_refs, false_refs = doc["correct_answers"], doc["incorrect_answers"] + # all_refs = true_refs + false_refs + + document = doc["article"] + gold_summary = doc["highlights"] + + true_refs = [doc["highlights"]] + all_refs = true_refs + + # ROUGE-N + rouge_scores = [rouge([ref], [completion]) for ref in all_refs] + # ROUGE-1 + rouge1_scores = [score["rouge1"] for score in rouge_scores] + # ROUGE-2 + rouge2_scores = [score["rouge2"] for score in rouge_scores] + # ROUGE-L + rougeL_scores = [score["rougeLsum"] for score in rouge_scores] + + self.maybe_init_factkb() + input_factkb = [[completion, document]] + factkb_tokens = self.factkb_tokenizer(input_factkb, return_tensors="pt", padding="max_length", truncation=True).to(self.factkb_model.device) + factkb_logits = self.factkb_model(**factkb_tokens).logits + factkb_res = torch.softmax(factkb_logits, dim=1) + + self.maybe_init_bertscore() + bert_score_res = self.bert_score.compute(predictions=[completion], references=[gold_summary], model_type="microsoft/deberta-xlarge-mnli", lang="en") + + res = { + "rouge1": rouge1_scores[0], + "rouge2": rouge2_scores[0], + "rougeL": rougeL_scores[0], + "factKB": float(factkb_res[0][1]), + "bertscore_precision": float(bert_score_res["precision"][0]), + "bertscore_recall": float(bert_score_res["recall"][0]), + "bertscore_f1": float(bert_score_res["f1"][0]) + } + + return res + + def aggregation(self): + """ + :returns: {str: [float] -> float} + A dictionary where keys are the names of submetrics and values are + functions that aggregate a list of metrics + """ + return {k: mean for k in ["rouge1", "rouge2", "rougeL", "factKB", "bertscore_precision", "bertscore_recall", "bertscore_f1"]} + + def higher_is_better(self): + """ + :returns: {str: bool} + A dictionary where keys are the names of submetrics and values are + whether a higher value of the submetric is better + """ + return {k: True for k in ["rouge1", "rouge2", "rougeL", "factKB", "bertscore_precision", "bertscore_recall", "bertscore_f1"]} + diff --git a/src/backend/tasks/faithdial/faithdial.yaml b/src/backend/tasks/faithdial/faithdial.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f993ddd2c176aace94207b022d0387f7a8f910b --- /dev/null +++ b/src/backend/tasks/faithdial/faithdial.yaml @@ -0,0 +1,14 @@ +task: faithdial_hallu +dataset_path: McGill-NLP/FaithDial +training_split: train +validation_split: validation +test_split: test +output_type: multiple_choice +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +doc_to_choice: ["false", "true"] +metric_list: + - metric: acc + higher_is_better: True +metadata: + version: 0.0 diff --git a/src/backend/tasks/faithdial/faithdial_v2.yaml b/src/backend/tasks/faithdial/faithdial_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0356d0f92c1fd8b77446fd22434880bd060384a3 --- /dev/null +++ b/src/backend/tasks/faithdial/faithdial_v2.yaml @@ -0,0 +1,14 @@ +task: faithdial_hallu_v2 +dataset_path: McGill-NLP/FaithDial +training_split: train +validation_split: validation +test_split: test +output_type: multiple_choice +doc_to_text: !function utils.doc_to_text_v2 +doc_to_target: !function utils.doc_to_target +doc_to_choice: ["false", "true"] +metric_list: + - metric: acc + higher_is_better: True +metadata: + version: 0.0 diff --git a/src/backend/tasks/faithdial/utils.py b/src/backend/tasks/faithdial/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..10300115f942fb760816b6292222c86acd4b48a6 --- /dev/null +++ b/src/backend/tasks/faithdial/utils.py @@ -0,0 +1,19 @@ +from typing import List, Union +ValueType = Union[str, List[str]] + + +def doc_to_text(doc: dict[str, ValueType]) -> str: + history_str = " ".join([f'[{"Human" if i % 2 == 0 else "Assistant"}] {m}' for i, m in enumerate(doc['history'])]) + doc_text = f'#Knowledge#: {doc["knowledge"]}\n#Dialogue History#: {history_str}\n#Response#: {doc["response"]}\n#Hallucinated#:' + return doc_text + + +def doc_to_text_v2(doc: dict[str, ValueType]) -> str: + history_str = " ".join([f'[{"Human" if i % 2 == 0 else "Assistant"}] {m}' for i, m in enumerate(doc['history'])]) + doc_text = f'#Knowledge#: {doc["knowledge"]}\n#Dialogue History#: {history_str}\n#Response#: {doc["original_response"]}\n#Hallucinated#:' + return doc_text + + +def doc_to_target(doc: dict[str, ValueType]) -> str: + res = "true" if "Hallucination" in doc["BEGIN"] else "false" + return res diff --git a/src/backend/tasks/fever/fever10.yaml b/src/backend/tasks/fever/fever10.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2649b5194f095c500e2671eb2dc1483c862c5f81 --- /dev/null +++ b/src/backend/tasks/fever/fever10.yaml @@ -0,0 +1,16 @@ +task: fever10 +dataset_path: fever +dataset_name: v1.0 +output_type: multiple_choice +training_split: train +validation_split: labelled_dev +test_split: null +doc_to_text: "Claim: {{claim}}\nLabel:" +doc_to_choice: ["SUPPORTS", "REFUTES", "NOT ENOUGH INFO"] +doc_to_target: label +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/src/backend/tasks/fever/fever11.yaml b/src/backend/tasks/fever/fever11.yaml new file mode 100644 index 0000000000000000000000000000000000000000..46a5c8b687c7d2af63250a7343a0abdbdbcb4559 --- /dev/null +++ b/src/backend/tasks/fever/fever11.yaml @@ -0,0 +1,16 @@ +task: fever11 +dataset_path: pminervini/hl-fever +dataset_name: v1.0 +output_type: multiple_choice +training_split: train +validation_split: dev +test_split: null +doc_to_text: "Claim: {{claim}}\nLabel:" +doc_to_choice: ["supported", "refuted"] +doc_to_target: label +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/src/backend/tasks/halueval/halueval_dialogue.yaml b/src/backend/tasks/halueval/halueval_dialogue.yaml new file mode 100644 index 0000000000000000000000000000000000000000..22c1ff3ceafec96aa191d343a2ba363e42440447 --- /dev/null +++ b/src/backend/tasks/halueval/halueval_dialogue.yaml @@ -0,0 +1,29 @@ +task: halueval_dialogue +dataset_path: pminervini/HaluEval +dataset_name: dialogue_samples +output_type: generate_until +training_split: null +validation_split: null +test_split: data +num_fewshot: 0 +doc_to_text: !function utils.doc_to_text_dialogue +doc_to_target: !function utils.doc_to_target +process_results: !function utils.process_results +generation_kwargs: + until: + - "\n" + - "." + do_sample: false + temperature: 0.0 +metric_list: + - metric: em + aggregation: mean + higher_is_better: true + - metric: correctness + aggregation: mean + higher_is_better: true + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/src/backend/tasks/halueval/halueval_qa.yaml b/src/backend/tasks/halueval/halueval_qa.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b1e3316047c2d81b0978fca10b6cdd8c8bc9b651 --- /dev/null +++ b/src/backend/tasks/halueval/halueval_qa.yaml @@ -0,0 +1,29 @@ +task: halueval_qa +dataset_path: pminervini/HaluEval +dataset_name: qa_samples +output_type: generate_until +training_split: null +validation_split: null +test_split: data +num_fewshot: 0 +doc_to_text: !function utils.doc_to_text_qa +doc_to_target: !function utils.doc_to_target +process_results: !function utils.process_results +generation_kwargs: + until: + - "\n" + - "." + do_sample: false + temperature: 0.0 +metric_list: + - metric: em + aggregation: mean + higher_is_better: true + - metric: correctness + aggregation: mean + higher_is_better: true + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/src/backend/tasks/halueval/halueval_summarization.yaml b/src/backend/tasks/halueval/halueval_summarization.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c4853588127e7bb45d07fcc96e8039ada88e3e4 --- /dev/null +++ b/src/backend/tasks/halueval/halueval_summarization.yaml @@ -0,0 +1,29 @@ +task: halueval_summarization +dataset_path: pminervini/HaluEval +dataset_name: summarization_samples +output_type: generate_until +training_split: null +validation_split: null +test_split: data +num_fewshot: 0 +doc_to_text: !function utils.doc_to_text_summarization +doc_to_target: !function utils.doc_to_target +process_results: !function utils.process_results +generation_kwargs: + until: + - "\n" + - "." + do_sample: false + temperature: 0.0 +metric_list: + - metric: em + aggregation: mean + higher_is_better: true + - metric: correctness + aggregation: mean + higher_is_better: true + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 diff --git a/src/backend/tasks/halueval/utils.py b/src/backend/tasks/halueval/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..f3740567cc1e88e3171b33116e387bcb56aab0ad --- /dev/null +++ b/src/backend/tasks/halueval/utils.py @@ -0,0 +1,136 @@ +# Main reference: https://github.com/RUCAIBox/HaluEval/blob/main/evaluation/evaluate.py + +QA_INSTURCTIONS = """I want you act as an answer judge. Given a question and an answer, your objective is to determine if the provided answer contains non-factual or hallucinated information. You SHOULD give your judgement based on the following hallucination types and the world knowledge. + +You are trying to determine if the answer misunderstands the question context and intention. +#Question#: What is a rare breed of dog that was derived as a variant of Rat Terrier, Shiloh Shepherd dog or American Hairless Terrier? +#Answer#: American Hairless Terrier +#Your Judgement#: No + +You are trying to determine if there is a factual contradiction between the answer and the world knowledge. Some information in the answer might be fabricated. +#Question#: Are the New Orleans Outfall Canals the same length as the Augusta Canal? +#Answer#: No, the New Orleans Outfall Canals and the Augusta Canal are not the same length. The Orleans Canal is approximately 3.6 miles (5.8 kilometers) long while the Augusta Canal is approximately 7 miles (11.3 kilometers) long. +#Your Judgement#: Yes +#Question#: What U.S Highway gives access to Zilpo Road, and is also known as Midland Trail? +#Answer#: U.S Highway 70 +#Your Judgement#: Yes + +You are trying to determine if the answer is too general or too specific to answer the question at an appropriate level of specificity. +#Question#: What genre do Superheaven and Oceansize belong to? +#Answer#: Superheaven and Oceansize belong to the rock genre. +#Your Judgement#: No +#Question#: What profession do Kōbō Abe and Agatha Christie share? +#Answer#: Playwright. +#Your Judgement#: No + +You are trying to determine if the answer can be correctly inferred from the knowledge. +#Question#: Which band has more members, Muse or The Raconteurs? +#Answer#: Muse has more members than The Raconteurs. +#Your Judgement#: Yes +#Question#: Which is currently more valuable, Temagami-Lorrain Mine or Meadowbank Gold Mine? +#Answer#: Meadowbank Gold Mine, since Meadowbank Gold Mine is still producing gold and the TemagamiLorrain Mine has been inactive for years. +#Your Judgement#: No + +You should try your best to determine if the answer contains non-factual or hallucinated information according to the above hallucination types. The answer you give MUST be \"Yes\" or \"No\"".""" + +DIALOGUE_INSTRUCTIONS = """I want you act as a response judge. Given a dialogue history and a response, your objective is to determine if the provided response contains non-factual or hallucinated information. You SHOULD give your judgement based on the following hallucination types and the world knowledge. + +You are trying to determine if the true entity in the response is replaced with a highly similar entity. +#Dialogue History#: [Human]: Could you recommand movies similar to The Dark Knight? [Assistant]: The sequel to Batman Begins is The Dark Knight. [Human]: Okay. Who is the director of The Dark Knight and any other movies from him not related to Batman? +#Response#: Christopher Nolan was the director. He also directed insomnia and inception. +#Your Judgement#: No +#Dialogue History#: [Human]: Could you recommand movies similar to The Dark Knight? [Assistant]: The sequel to Batman Begins is The Dark Knight. [Human]: Okay. Who is the director of The Dark Knight and any other movies from him not related to Batman? +#Response#: Steven Spielberg was the director. He also directed insomnia and inception. +#Your Judgement#: Yes + +You are trying to determine if the true entity in the response is replaced with a dissimilar entity. +#Dialogue History#: [Human]: Could you recommand movies similar to The Dark Knight? [Assistant]: The sequel to Batman Begins is The Dark Knight. [Human]: Okay. Who is the director of The Dark Knight and any other movies from him not related to Batman? +#Response#: Christopher Nolan was the director. He also directed insomnia and inception. +#Your Judgement#: No +#Dialogue History#: [Human]: Could you recommand movies similar to The Dark Knight? [Assistant]: The sequel to Batman Begins is The Dark Knight. [Human]: Okay. Who is the director of The Dark Knight and any other movies from him not related to Batman? +#Response#: Batman Begins was the director. He also directed insomnia and inception. +#Your Judgement#: Yes + +You are trying to determine if the true entity in the response is replaced with a dissimilar entity in a different entity type. +#Dialogue History#: [Human]: Could you recommand movies similar to The Dark Knight? [Assistant]: The sequel to Batman Begins is The Dark Knight. [Human]: Okay. Who is the director of The Dark Knight and any other movies from him not related to Batman? +#Response#: Christopher Nolan was the director. He also directed insomnia and inception. +#Your Judgement#: No +#Dialogue History#: [Human]: Could you recommand movies similar to The Dark Knight? [Assistant]: The sequel to Batman Begins is The Dark Knight. [Human]: Okay. Who is the director of The Dark Knight and any other movies from him not related to Batman? +#Response#: United States of America was the director. He also directed insomnia and inception. +#Your Judgement#: Yes + +You should try your best to determine if the response contains non-factual or hallucinated information according to the above hallucination types. The answer you give MUST be \"Yes\" or \"No\"".""" + +SUMMARIZATION_INSTRUCTIONS = """I want you act as a summary judge. Given a document and a summary, your objective is to determine if the provided summary contains non-factual or hallucinated information. You SHOULD give your judgement based on the following hallucination types and the world knowledge. + +You are trying to determine if the summary is factual but some information cannot be directly inferred or entailed from the document. +#Document#: The panther chameleon was found on Monday by a dog walker in the wooded area at Marl Park. It had to be put down after X-rays showed all of its legs were broken and it had a deformed spine. RSPCA Cymru said it was an "extremely sad example of an abandoned and neglected exotic pet". Inspector Selina Chan said: "It is a possibility that the owners took on this animal but were unable to provide the care he needs and decided to release him to the wild. "We are urging potential owners of exotic animals to thoroughly research what is required in the care of the particular species before taking one on. "Potential owners need to make sure they can give their animal the environment it needs and they have the facilities, time, financial means and long-term commitment to maintain a good standard of care, as required under the Animal Welfare Act 2006." She added it was illegal to release non-native species into the wild. +#Summary#: A chameleon that was found in a Cardiff park has been put down after being abandoned and neglected by its owners. +#Your Judgement#: Yes + +You are trying to determine if there exists some non-factual and incorrect information in the summary. +#Document#: The city was brought to a standstill on 15 December last year when a gunman held 18 hostages for 17 hours. Family members of victims Tori Johnson and Katrina Dawson were in attendance. Images of the floral tributes that filled the city centre in the wake of the siege were projected on to the cafe and surrounding buildings in an emotional twilight ceremony. Prime Minister Malcolm Turnbull gave an address saying a "whole nation resolved to answer hatred with love". "Testament to the spirit of Australians is that with such unnecessary, thoughtless tragedy, an amazing birth of mateship, unity and love occurs. Proud to be Australian," he said. How the Sydney siege unfolded New South Wales Premier Mike Baird has also announced plans for a permanent memorial to be built into the pavement in Martin Place. Clear cubes containing flowers will be embedded into the concrete and will shine with specialised lighting. It is a project inspired by the massive floral tributes that were left in the days after the siege. "Something remarkable happened here. As a city we were drawn to Martin Place. We came in shock and in sorrow but every step we took was with purpose," he said on Tuesday. +#Summary#: Crowds have gathered in Sydney's Martin Place to honour the victims of the Lindt cafe siege, one year on. +#Your Judgement#: No + +You are trying to determine if there is a factual contradiction between the summary and the document. +#Document#: Christopher Huxtable, 34, from Swansea, had been missing since the collapse in February. His body was found on Wednesday and workers who carried out the search formed a guard of honour as it was driven from the site in the early hours of the morning. Ken Cresswell, 57, and John Shaw, 61, both from Rotherham, remain missing. The body of a fourth man, Michael Collings, 53, from Brotton, Teesside, was previously recovered from the site. Swansea East MP Carolyn Harris, who has been involved with the family since the incident, said they still did not know all the facts about the collapse. She said: "I feel very sad. My heart and my prayers go out to the family who have waited desperately for Christopher's body to be found. They can finally have closure, and say goodbye to him and grieve his loss. "But let's not forget that there's two other families who are still waiting for their loved ones to be returned." The building was due for demolition when it partially collapsed in February. +#Summary#: The body of a man whose body was found at the site of the Swansea Bay Power Station collapse has been removed from the site. +#Your Judgement#: Yes + +You should try your best to determine if the summary contains non-factual or hallucinated information according to the above hallucination types. The answer you give MUST be \"Yes\" or \"No\"".""" + + +def doc_to_text_qa(doc: dict[str, str]) -> str: + # prompt = instruction + "\n\n#Question#: " + question + "\n#Answer#: " + answer + "\n#Your Judgement#:" + doc_text = QA_INSTURCTIONS + "\n\n#Knowledge#: " + doc["knowledge"] + "\n#Question#: " + doc["question"] + "\n#Answer#: " + doc["answer"] + "\n#Your Judgement#:" + return doc_text + + +def doc_to_text_dialogue(doc: dict[str, str]) -> str: + # prompt = instruction + "\n\n#Dialogue History#: " + dialog + "\n#Response#: " + response + "\n#Your Judgement#:" + doc_text = DIALOGUE_INSTRUCTIONS + "\n\n#Knowledge#: " + doc["knowledge"] + "\n#Dialogue History#: " + doc["dialogue_history"] + "\n#Response#: " + doc["response"] + "\n#Your Judgement#:" + return doc_text + + +def doc_to_text_summarization(doc: dict[str, str]) -> str: + # prompt1 = instruction + "\n\n#Document#: " + document + # prompt2 = "\n#Summary#: " + summary + "\n#Your Judgement#:" + doc_text_1 = SUMMARIZATION_INSTRUCTIONS + "\n\n#Document#: " + doc["document"] + doc_text_2 = "\n#Summary#: " + doc["summary"] + "\n#Your Judgement#:" + doc_text = doc_text_1 + doc_text_2 + return doc_text + + +def doc_to_target(doc: dict[str, str]) -> str: + return doc['hallucination'] + + +def compute_metrics(gold_answer: str, prediction: str) -> dict[str, float]: + is_correct = True + + if ("Yes" in prediction and "No" in prediction) or ("Yes" not in prediction and "No" not in prediction): + is_correct = False + elif "Yes" in prediction: + prediction = "yes" + elif "No" in prediction: + prediction = "no" + + is_exact = gold_answer == prediction + + res = {"correctness": 1.0 if is_correct else 0.0} + if is_correct: + res["em"] = 1.0 if is_exact else 0.0 + + res["acc"] = 1.0 if (is_correct and is_exact) else 0.0 + + return res + + +def process_results(doc: dict[str, str], results: list[str]) -> dict[str, float]: + # results is e.g., ['Yes'] + gold_list = doc_to_target(doc) + # gold_list is e.g., 'yes' + prediction = results[0].strip().split("\n")[0] + scores = compute_metrics(gold_list, prediction) + return scores diff --git a/src/backend/tasks/memo-trap/memo-trap.yaml b/src/backend/tasks/memo-trap/memo-trap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7a24dd459f602d28edc66f3054308dbd378919d0 --- /dev/null +++ b/src/backend/tasks/memo-trap/memo-trap.yaml @@ -0,0 +1,19 @@ +task: memo-trap +dataset_path: pminervini/inverse-scaling +dataset_name: memo-trap +output_type: multiple_choice +training_split: null +validation_split: data +test_split: null +num_fewshot: 0 +doc_to_text: "{{prompt}}" +doc_to_target: answer_index +doc_to_choice: "{{classes}}" +should_decontaminate: False +doc_to_decontamination_query: prompt +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 \ No newline at end of file diff --git a/src/backend/tasks/memo-trap/memo-trap_v2.yaml b/src/backend/tasks/memo-trap/memo-trap_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2c608366e5077260d7de8c3c59e94bbab428ecc --- /dev/null +++ b/src/backend/tasks/memo-trap/memo-trap_v2.yaml @@ -0,0 +1,20 @@ +task: memo-trap_v2 +dataset_path: pminervini/inverse-scaling +dataset_name: memo-trap +output_type: multiple_choice +training_split: null +validation_split: data +test_split: null +# num_fewshot: 0 +doc_to_text: "{{prompt}}" +doc_to_target: answer_index +doc_to_choice: "{{classes}}" +target_delimiter: "" +should_decontaminate: False +doc_to_decontamination_query: prompt +metric_list: + - metric: acc + aggregation: mean + higher_is_better: true +metadata: + version: 0.0 \ No newline at end of file diff --git a/src/backend/tasks/nq8/README.md b/src/backend/tasks/nq8/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/src/backend/tasks/nq8/nq8.yaml b/src/backend/tasks/nq8/nq8.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dbe5df2e98b56a454d859b60209933e3f91a530b --- /dev/null +++ b/src/backend/tasks/nq8/nq8.yaml @@ -0,0 +1,32 @@ +task: nq8 +dataset_path: nq_open +output_type: generate_until +training_split: train +validation_split: validation +description: "Answer these questions:\n\n" +doc_to_text: "Q: {{question}}?\nA:" +doc_to_target: "{{answer}}" # TODO: should be multi-target +fewshot_delimiter: "\n" +generation_kwargs: + until: + - "\n" + - "." + - "," + do_sample: false + temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + regexes_to_ignore: + - "\\b(?:The |the |An |A |The |a |an )" +metadata: + version: 0.0 diff --git a/src/backend/tasks/nq_swap/nq_swap.yaml b/src/backend/tasks/nq_swap/nq_swap.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0b61dd1af6b18a03ee7c93238c2c555ce27ffc77 --- /dev/null +++ b/src/backend/tasks/nq_swap/nq_swap.yaml @@ -0,0 +1,31 @@ +task: nq_swap +dataset_path: pminervini/NQ-Swap +output_type: generate_until +validation_split: substituted +description: "Answer the following question based on the provided context:\n\n" +doc_to_text: "Context: {{context}}\nQuestion: {{question}}?\nAnswer:" +doc_to_target: "{{answer}}" # TODO: should be multi-target +fewshot_delimiter: "\n\n" +generation_kwargs: + until: + - "\n" + - "." + - "," + do_sample: false + temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + regexes_to_ignore: + - "\\b(?:The |the |An |A |The |a |an )" +metadata: + version: 0.0 \ No newline at end of file diff --git a/src/backend/tasks/selfcheckgpt/README.md b/src/backend/tasks/selfcheckgpt/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7813db696437c30c378705e32c3c32c7342876c1 --- /dev/null +++ b/src/backend/tasks/selfcheckgpt/README.md @@ -0,0 +1,94 @@ +# Task-name + +### Paper + +Title: `SelfCheckGPT: Zero-Resource Black-Box Hallucination Detection for Generative Large Language Models` + +Abstract: `Generative Large Language Models (LLMs) such as GPT-3 are capable of generating highly fluent responses to a wide variety of user prompts. However, LLMs are known to hallucinate facts and make non-factual statements which can undermine trust in their output. Existing fact-checking approaches either require access to the output probability distribution (which may not be available for systems such as ChatGPT) or external databases that are interfaced via separate, often complex, modules. In this work, we propose "SelfCheckGPT", a simple sampling-based approach that can be used to fact-check the responses of black-box models in a zero-resource fashion, i.e. without an external database. SelfCheckGPT leverages the simple idea that if an LLM has knowledge of a given concept, sampled responses are likely to be similar and contain consistent facts. However, for hallucinated facts, stochastically sampled responses are likely to diverge and contradict one another. We investigate this approach by using GPT-3 to generate passages about individuals from the WikiBio dataset, and manually annotate the factuality of the generated passages. We demonstrate that SelfCheckGPT can: i) detect non-factual and factual sentences; and ii) rank passages in terms of factuality. We compare our approach to several baselines and show that our approach has considerably higher AUC-PR scores in sentence-level hallucination detection and higher correlation scores in passage-level factuality assessment compared to grey-box methods.` + +`task.py` in this folder uses the original + +Homepage: [selfcheckgpt](https://github.com/potsawee/selfcheckgpt) + + +### Citation + +``` +@article{manakul2023selfcheckgpt, + title={Selfcheckgpt: Zero-resource black-box hallucination detection for generative large language models}, + author={Manakul, Potsawee and Liusie, Adian and Gales, Mark JF}, + journal={arXiv preprint arXiv:2303.08896}, + year={2023} +} +``` + +#### Tasks + +* `selfcheckgpt`: This task uses generative models to generate wikipedia passage based on given starting topics/words. Then generated passages are messured by [selfcheckgpt](https://github.com/potsawee/selfcheckgpt). The default metric is `SelfCheckNgram`, which does not need GPU. Other metrics are `SelfCheckBERTScore`, `SelfCheckMQAG` and `SelfCheckNLI`, which are model-based scores. You can change the metric by changing the enviornment variables. + +The results `"avg-selfcheckgpt` and `max-selfcheckgpt` is the average and max sentences' `selfcheckgpt` score for the generated passage(with temperature=0.0). The score is lower and it is less likely to be hallucination. +``` +export SELFCHECKGPTTYPE=SelfCheckBERTScore #SelfCheckMQAG, SelfCheckNLI +``` + +Since model-based metric are slow when they are running in cpu, you can change the running device to gpu by: +``` +export SELFCHECKGPTDEVICE=cuda +``` +#### Dependencies for sucessful running +``` +pip install spacy +pip install selfcheckgpt +python -m spacy download en +``` +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [x] Have you referenced the original paper that introduced the task? + * [x] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [x] Is the "Main" variant of this task clearly denoted? +* [x] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? + + + + + + + + +# SelfCheckGPT: Zero-Resource Black-Box Hallucination Detection for Generative Large Language Models + +In order to run selfcheckgpt evaluation, these dependencies should be installed. +``` +pip install spacy +pip install selfcheckgpt +python -m spacy download en +``` + +selfcheckgpt support different evaluation methods including: `SelfCheckNgram`, `SelfCheckBERTScore`, `SelfCheckMQAG` and `SelfCheckNLI`. +The default evaluation method in llm-eval-harness is `SelfCheckNgram`. You can change the evaluation method by changing the environment variable +``` +export SELFCHECKGPTTYPE=SelfCheckNgram +``` +For `SelfCheckBERTScore`, `SelfCheckMQAG` and `SelfCheckNLI` evaluation method which will also run some huggingface models, You can change the running device of the selfcheckgpt to GPU by setting enviroment device: +``` +export SELFCHECKGPTDEVICE=cuda +``` + +## Citation + +``` +@misc{manakul2023selfcheckgpt, + title={SelfCheckGPT: Zero-Resource Black-Box Hallucination Detection for Generative Large Language Models}, + author={Potsawee Manakul and Adian Liusie and Mark J. F. Gales}, + year={2023}, + eprint={2303.08896}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` \ No newline at end of file diff --git a/src/backend/tasks/selfcheckgpt/selfcheckgpt.yaml b/src/backend/tasks/selfcheckgpt/selfcheckgpt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..159ec3947a0920902e4eab31a24e3ecd247ad940 --- /dev/null +++ b/src/backend/tasks/selfcheckgpt/selfcheckgpt.yaml @@ -0,0 +1,2 @@ +task: selfcheckgpt +class: !function task.SelfCheckGPT diff --git a/src/backend/tasks/selfcheckgpt/task.py b/src/backend/tasks/selfcheckgpt/task.py new file mode 100644 index 0000000000000000000000000000000000000000..30b3de66233dddde2203886d136ce9eb4a4381bb --- /dev/null +++ b/src/backend/tasks/selfcheckgpt/task.py @@ -0,0 +1,148 @@ +import os +from typing import Union, List + +from lm_eval.api.task import ConfigurableTask +from lm_eval.api.instance import Instance +# from lm_eval.api.registry import register_task +from lm_eval.api.metrics import mean + +from src.backend.envs import DEVICE + +import spacy +from selfcheckgpt.modeling_selfcheck import SelfCheckMQAG, SelfCheckNLI, SelfCheckBERTScore, SelfCheckNgram + + +# @register_task("selfcheckgpt") +class SelfCheckGPT(ConfigurableTask): + VERSION = 0.0 + DATASET_PATH = "potsawee/wiki_bio_gpt3_hallucination" + DATASET_NAME = None + OUTPUT_TYPE = 'generate_until' + + def __init__(self): + super().__init__(config={'metadata': {'version': self.VERSION}}) + # these end tokens are hard coded because of the current limitaion of the llm-eval. + self.generation_kwargs = {"until": ["\n\n", "", "<|im_end|>", "", "<|endoftext|>"], "max_length": 512} + self.generation_kwargs_sampling_number = 5 # the number of sampling for self-consistence + self.generation_kwargs_sampling = {"temperature": 0.99, "do_sample": True, "until": ["\n\n", "", "<|im_end|>", ""], "max_length": 512} + + self.selfcheckgpt_type = os.environ.get('SELFCHECKGPTTYPE', 'SelfCheckNLI') + self.selfcheckgpt_device = os.environ.get('SELFCHECKGPTDEVICE', DEVICE) + self.selfcheckgpt_nlp = spacy.load("en_core_web_sm") + + if self.selfcheckgpt_type == 'SelfCheckNgram': + self.selfcheckgpt = SelfCheckNgram(n=1) + elif self.selfcheckgpt_type == 'SelfCheckBERTScore': + self.selfcheckgpt = SelfCheckBERTScore(rescale_with_baseline=True) + elif self.selfcheckgpt_type == 'SelfCheckMQAG': + self.selfcheckgpt = SelfCheckMQAG(device=self.selfcheckgpt_device) + elif self.selfcheckgpt_type == 'SelfCheckNLI': + self.selfcheckgpt = SelfCheckNLI(device=self.selfcheckgpt_device) + self.SelfCheckNLI_error_cnt = 0 + + def has_training_docs(self): + return False + + def has_validation_docs(self): + return True + + def has_test_docs(self): + return False + + def validation_docs(self): + return self.dataset["evaluation"] + + def doc_to_text(self, doc): + if not hasattr(self, 'selfcheckgpt_nlp'): + self.selfcheckgpt_nlp = spacy.load("en_core_web_sm") + + sentences = [x.text.strip() for x in self.selfcheckgpt_nlp(doc['wiki_bio_text']).sents] + if len(sentences) < 2: + raise ValueError("This wikipedia passage is too short for self-consistency check: {sentences}") + # disscussed with Potsawee + + doc_text = f"Please generate a Wikipedia passage that consists of at least two sentences, starting with the following sentence: {sentences[0]}\n" + return doc_text + + def doc_to_target(self, doc): + answer = doc['wiki_bio_text'] + return answer + + def construct_requests(self, doc: dict, ctx: str, **kwargs) -> Union[List[Instance], Instance]: + arguments = (ctx, self.generation_kwargs) + request_list = [ + Instance(request_type='generate_until', doc=doc, arguments=arguments, idx=0, **kwargs), + ] + sampling_arguments = (ctx, self.generation_kwargs_sampling) + request_list.extend([ + Instance(request_type='generate_until', doc=doc, arguments=sampling_arguments, idx=idx, **kwargs) + for idx in range(1, self.generation_kwargs_sampling_number+1) + ] + ) + return request_list + + def process_results(self, doc, results): + response_temperature_0 = results[0] + other_responses = results[1:] + passage = self.doc_to_target(doc) + + sentences = self.selfcheckgpt_nlp(response_temperature_0) + sentences = [sent.text.strip() for sent in sentences.sents] + if self.selfcheckgpt_type == 'SelfCheckNgram': + selfcheckgpt_scores = self.selfcheckgpt.predict(sentences=sentences, passage=response_temperature_0, sampled_passages=other_responses) + return { + 'avg-selfcheckgpt': selfcheckgpt_scores['doc_level']['avg_neg_logprob'], + 'max-selfcheckgpt': selfcheckgpt_scores['doc_level']['avg_max_neg_logprob'] + } + + elif self.selfcheckgpt_type == 'SelfCheckBERTScore': + selfcheckgpt_scores = self.selfcheckgpt.predict(sentences=sentences, sampled_passages=other_responses) + elif self.selfcheckgpt_type == 'SelfCheckMQAG': + selfcheckgpt_scores = self.selfcheckgpt.predict( + sentences=sentences, + passage=response_temperature_0, + sampled_passages=other_responses, + num_questions_per_sent=5, # number of questions to be drawn + scoring_method='bayes_with_alpha', # options = 'counting', 'bayes', 'bayes_with_alpha' + beta1=0.8, beta2=0.8) # additional params depending on scoring_method + elif self.selfcheckgpt_type == 'SelfCheckNLI': + selfcheckgpt_scores = self.selfcheckgpt.predict(sentences=sentences, sampled_passages=other_responses) + + if len(selfcheckgpt_scores) < 2: + # at least two sentences + self.SelfCheckNLI_error_cnt += 1 + result = { + 'avg-selfcheckgpt': 0.0, + 'max-selfcheckgpt': 0.0 + } + + else: + threshold = 0.7 # https://huggingface.co/blog/dhuynh95/automatic-hallucination-detection + # passage is hallucianted if one sentence is hallucinated. It's very strict. + selfcheckgpt_scores_max = 0.0 if max(selfcheckgpt_scores) > threshold else 1.0 + # passage is hallucianted if average score of all sentences is hallucinated. + selfcheckgpt_scores_avg = 0.0 if sum(selfcheckgpt_scores) / len(selfcheckgpt_scores) > threshold else 1.0 + result = {'avg-selfcheckgpt': selfcheckgpt_scores_avg, 'max-selfcheckgpt': selfcheckgpt_scores_max} + + return result + + selfcheckgpt_scores_avg = sum(selfcheckgpt_scores) / len(selfcheckgpt_scores) if len(selfcheckgpt_scores) > 0 else 0 + selfcheckgpt_scores_max = max(selfcheckgpt_scores) + + return {'avg-selfcheckgpt': selfcheckgpt_scores_avg, 'max-selfcheckgpt': selfcheckgpt_scores_max} + + def aggregation(self): + """ + :returns: {str: [float] -> float} + A dictionary where keys are the names of submetrics and values are + functions that aggregate a list of metrics + """ + return {k: mean for k in ["avg-selfcheckgpt", "max-selfcheckgpt"]} + + def higher_is_better(self): + """ + :returns: {str: bool} + A dictionary where keys are the names of submetrics and values are + whether a higher value of the submetric is better + """ + return {k: True for k in ["avg-selfcheckgpt", "max-selfcheckgpt"]} diff --git a/src/backend/tasks/tqa8/README.md b/src/backend/tasks/tqa8/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1722b709886b938ded164ad0eee260a2e0f6b78e --- /dev/null +++ b/src/backend/tasks/tqa8/README.md @@ -0,0 +1,51 @@ +# Trivia QA + +### Paper + +Title: `TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension` +Abstract: https://arxiv.org/abs/1705.03551 + +TriviaQA is a reading comprehension dataset containing over 650K question-answer-evidence +triples. TriviaQA includes 95K question-answer pairs authored by trivia enthusiasts +and independently gathered evidence documents, six per question on average, that provide +high quality distant supervision for answering the questions. + +Homepage: https://nlp.cs.washington.edu/triviaqa/ + + +### Citation + +``` +@InProceedings{JoshiTriviaQA2017, + author = {Joshi, Mandar and Choi, Eunsol and Weld, Daniel S. and Zettlemoyer, Luke}, + title = {TriviaQA: A Large Scale Distantly Supervised Challenge Dataset for Reading Comprehension}, + booktitle = {Proceedings of the 55th Annual Meeting of the Association for Computational Linguistics}, + month = {July}, + year = {2017}, + address = {Vancouver, Canada}, + publisher = {Association for Computational Linguistics}, +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet. + +#### Tasks + +* `triviaqa`: `Generate and answer based on the question.` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/src/backend/tasks/tqa8/tqa8.yaml b/src/backend/tasks/tqa8/tqa8.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ccaffb899b8c8fbec98c76fc63c58b660c5f5709 --- /dev/null +++ b/src/backend/tasks/tqa8/tqa8.yaml @@ -0,0 +1,31 @@ +task: tqa8 +dataset_path: trivia_qa +dataset_name: rc.nocontext +output_type: generate_until +training_split: train +validation_split: validation +doc_to_text: "Question: {{question}}?\nAnswer:" +doc_to_target: "{{answer.aliases}}" +should_decontaminate: true +doc_to_decontamination_query: question +generation_kwargs: + until: + - "\n" + - "." + - "," + do_sample: false + temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true +metadata: + version: 2.0 diff --git a/src/backend/tasks/truefalse/truefalse.yaml b/src/backend/tasks/truefalse/truefalse.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4d4aa4d92816a23693b415cc654a923e371b7d95 --- /dev/null +++ b/src/backend/tasks/truefalse/truefalse.yaml @@ -0,0 +1,13 @@ +task: truefalse_cieacf +dataset_path: pminervini/true-false +dataset_name: default +validation_split: cieacf +output_type: multiple_choice +doc_to_text: "Statement: {{statement}}\nLabel:" +doc_to_target: label +doc_to_choice: ["false", "true"] +metric_list: + - metric: acc + higher_is_better: True +metadata: + version: 0.0 diff --git a/src/backend/tasks/xsum/README.md b/src/backend/tasks/xsum/README.md new file mode 100644 index 0000000000000000000000000000000000000000..bad0c4e2d80ec17c3f4a4c2f15db2ce6a6632db4 --- /dev/null +++ b/src/backend/tasks/xsum/README.md @@ -0,0 +1,54 @@ +# Task-name + +### Paper + +Title: `Know What You Don’t Know: Unanswerable Questions for SQuAD` +Abstract: https://arxiv.org/abs/1806.03822 + +Stanford Question Answering Dataset (SQuAD) is a reading comprehension dataset, +consisting of questions posed by crowdworkers on a set of Wikipedia articles, +where the answer to every question is a segment of text, or span, from the +corresponding reading passage, or the question might be unanswerable. +SQuAD2.0 combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable +questions written adversarially by crowdworkers to look similar to answerable ones. +To do well on SQuAD2.0, systems must not only answer questions when possible, but +also determine when no answer is supported by the paragraph and abstain from answering. + +Homepage: https://rajpurkar.github.io/SQuAD-explorer/ + + +### Citation + +``` +@misc{rajpurkar2018know, + title={Know What You Don't Know: Unanswerable Questions for SQuAD}, + author={Pranav Rajpurkar and Robin Jia and Percy Liang}, + year={2018}, + eprint={1806.03822}, + archivePrefix={arXiv}, + primaryClass={cs.CL} +} +``` + +### Groups and Tasks + +#### Groups + +* Not part of a group yet + +#### Tasks + +* `squadv2`: `Default squadv2 task` + +### Checklist + +For adding novel benchmarks/datasets to the library: +* [ ] Is the task an existing benchmark in the literature? + * [ ] Have you referenced the original paper that introduced the task? + * [ ] If yes, does the original paper provide a reference implementation? If so, have you checked against the reference implementation and documented how to run such a test? + + +If other tasks on this dataset are already supported: +* [ ] Is the "Main" variant of this task clearly denoted? +* [ ] Have you provided a short sentence in a README on what each new variant adds / evaluates? +* [ ] Have you noted which, if any, published evaluation setups are matched by this variant? diff --git a/src/backend/tasks/xsum/task.py b/src/backend/tasks/xsum/task.py new file mode 100644 index 0000000000000000000000000000000000000000..db9d55500b8ec9cac9c07a44eb16b36d623fff62 --- /dev/null +++ b/src/backend/tasks/xsum/task.py @@ -0,0 +1,188 @@ +from lm_eval.api.task import ConfigurableTask +from lm_eval.api.instance import Instance +# from lm_eval.api.registry import register_task +from lm_eval.api.metrics import mean + +import torch +import sacrebleu +from rouge_score import rouge_scorer, scoring + + +def bleu(refs, preds): + """ + Returns `t5` style BLEU scores. See the related implementation: + https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L41 + + :param refs: + A `list` of `list` of reference `str`s. + :param preds: + A `list` of predicted `str`s. + """ + score = sacrebleu.corpus_bleu(preds, refs, smooth_method="exp", smooth_value=0.0, force=False, + lowercase=False, tokenize="intl", use_effective_order=False).score + return score + + +def rouge(refs, preds): + """ + Returns `t5` style ROUGE scores. See the related implementation: + https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L68 + + :param refs: + A `list` of reference `strs`. + :param preds: + A `list` of predicted `strs`. + """ + rouge_types = ["rouge1", "rouge2", "rougeLsum"] + scorer = rouge_scorer.RougeScorer(rouge_types) + # Add newlines between sentences to correctly compute `rougeLsum`. + + def _prepare_summary(summary): + summary = summary.replace(" . ", ".\n") + return summary + + # Accumulate confidence intervals. + aggregator = scoring.BootstrapAggregator() + for ref, pred in zip(refs, preds): + ref = _prepare_summary(ref) + pred = _prepare_summary(pred) + aggregator.add_scores(scorer.score(ref, pred)) + result = aggregator.aggregate() + return {type: result[type].mid.fmeasure * 100 for type in rouge_types} + + +# @register_task("xsum") +class XSum(ConfigurableTask): + VERSION = 0 + DATASET_PATH = "EdinburghNLP/xsum" + DATASET_NAME = None + + def __init__(self): + super().__init__(config={'metadata': {'version': self.VERSION}}) + self.factkb_tokenizer = None + self.factkb_model = None + self.bert_score = None + + def maybe_init_factkb(self): + if self.factkb_tokenizer is None or self.factkb_model is None: + from transformers import AutoTokenizer, AutoModelForSequenceClassification + self.factkb_tokenizer = AutoTokenizer.from_pretrained("roberta-base", padding="max_length", truncation=True) + self.factkb_model = AutoModelForSequenceClassification.from_pretrained("bunsenfeng/FactKB", num_labels=2, device_map="auto") + + def maybe_init_bertscore(self): + if self.bert_score is None: + from evaluate import load + self.bert_score = load("bertscore") + + def has_training_docs(self): + return True + + def has_validation_docs(self): + return True + + def has_test_docs(self): + return True + + def training_docs(self): + return self.dataset["train"] + + def validation_docs(self): + return self.dataset["validation"] + + def test_docs(self): + return self.dataset["test"] + + def doc_to_text(self, doc): + return f'Document: {doc["document"]}\nSummary:' + + @staticmethod + def should_decontaminate(): + return True + + def doc_to_decontamination_query(self, doc): + return doc["document"] + + def doc_to_target(self, doc): + return doc["summary"] + + def construct_requests(self, doc, ctx, **kwargs): + """Uses RequestFactory to construct Requests and returns an iterable of + Requests which will be sent to the LM. + + :param doc: + The document as returned from training_docs, validation_docs, or test_docs. + :param ctx: str + The context string, generated by fewshot_context. This includes the natural + language description, as well as the few shot examples, and the question + part of the document for `doc`. + """ + + return [ + Instance( + request_type="generate_until", + doc=doc, + # arguments=(ctx, {"until": ["\n", "."]}), + arguments=(ctx, {"until": ["\n"]}), + idx=0, + **kwargs + ) + ] + + def process_results(self, doc, results): + completion = results[0] + + # breakpoint() + + document = doc["document"] + gold_summary = doc["summary"] + + true_refs = [doc["summary"]] + all_refs = true_refs + + # ROUGE-N + rouge_scores = [rouge([ref], [completion]) for ref in all_refs] + # ROUGE-1 + rouge1_scores = [score["rouge1"] for score in rouge_scores] + # ROUGE-2 + rouge2_scores = [score["rouge2"] for score in rouge_scores] + # ROUGE-L + rougeL_scores = [score["rougeLsum"] for score in rouge_scores] + + self.maybe_init_factkb() + input_factkb = [[completion, document]] + factkb_tokens = self.factkb_tokenizer(input_factkb, return_tensors="pt", padding="max_length", truncation=True).to(self.factkb_model.device) + factkb_logits = self.factkb_model(**factkb_tokens).logits + factkb_res = torch.softmax(factkb_logits, dim=1) + + self.maybe_init_bertscore() + bert_score_res = self.bert_score.compute(predictions=[completion], references=[gold_summary], model_type="microsoft/deberta-xlarge-mnli", lang="en") + + res = { + "rouge1": rouge1_scores[0], + "rouge2": rouge2_scores[0], + "rougeL": rougeL_scores[0], + "factKB": float(factkb_res[0][1]), + "bertscore_precision": float(bert_score_res["precision"][0]), + "bertscore_recall": float(bert_score_res["recall"][0]), + "bertscore_f1": float(bert_score_res["f1"][0]), + } + + # breakpoint() + + return res + + def aggregation(self): + """ + :returns: {str: [float] -> float} + A dictionary where keys are the names of submetrics and values are + functions that aggregate a list of metrics + """ + return {k: mean for k in ["rouge1", "rouge2", "rougeL", "factKB", "bertscore_precision", "bertscore_recall", "bertscore_f1"]} + + def higher_is_better(self): + """ + :returns: {str: bool} + A dictionary where keys are the names of submetrics and values are + whether a higher value of the submetric is better + """ + return {k: True for k in ["rouge1", "rouge2", "rougeL", "factKB", "bertscore_precision", "bertscore_recall", "bertscore_f1"]} diff --git a/src/backend/tasks/xsum/task_v2.py b/src/backend/tasks/xsum/task_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..405f4a8559bdfb8e1584e7f6e444b1bc1d51c0b3 --- /dev/null +++ b/src/backend/tasks/xsum/task_v2.py @@ -0,0 +1,193 @@ +from lm_eval.api.task import ConfigurableTask +from lm_eval.api.instance import Instance +# from lm_eval.api.registry import register_task +from lm_eval.api.metrics import mean + +import torch +import sacrebleu +from rouge_score import rouge_scorer, scoring + + +def bleu(refs, preds): + """ + Returns `t5` style BLEU scores. See the related implementation: + https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L41 + + :param refs: + A `list` of `list` of reference `str`s. + :param preds: + A `list` of predicted `str`s. + """ + score = sacrebleu.corpus_bleu(preds, refs, smooth_method="exp", smooth_value=0.0, force=False, + lowercase=False, tokenize="intl", use_effective_order=False).score + return score + + +def rouge(refs, preds): + """ + Returns `t5` style ROUGE scores. See the related implementation: + https://github.com/google-research/text-to-text-transfer-transformer/blob/3d10afd51ba97ac29eb66ae701eca274488202f7/t5/evaluation/metrics.py#L68 + + :param refs: + A `list` of reference `strs`. + :param preds: + A `list` of predicted `strs`. + """ + rouge_types = ["rouge1", "rouge2", "rougeLsum"] + scorer = rouge_scorer.RougeScorer(rouge_types) + # Add newlines between sentences to correctly compute `rougeLsum`. + + def _prepare_summary(summary): + summary = summary.replace(" . ", ".\n") + return summary + + # Accumulate confidence intervals. + aggregator = scoring.BootstrapAggregator() + for ref, pred in zip(refs, preds): + ref = _prepare_summary(ref) + pred = _prepare_summary(pred) + aggregator.add_scores(scorer.score(ref, pred)) + result = aggregator.aggregate() + return {type: result[type].mid.fmeasure * 100 for type in rouge_types} + + +# @register_task("xsum_v2") +class XSumv2(ConfigurableTask): + VERSION = 2 + DATASET_PATH = "EdinburghNLP/xsum" + DATASET_NAME = None + + def __init__(self): + # breakpoint() + super().__init__(config={'metadata': {'version': self.VERSION}, + 'generation_kwargs': {'do_sample': False, 'temperature': 0.0, 'until': ['\n', '\n\n']}}) + self.factkb_tokenizer = None + self.factkb_model = None + self.bert_score = None + + def maybe_init_factkb(self): + if self.factkb_tokenizer is None or self.factkb_model is None: + from transformers import AutoTokenizer, AutoModelForSequenceClassification + self.factkb_tokenizer = AutoTokenizer.from_pretrained("roberta-base", padding="max_length", truncation=True) + self.factkb_model = AutoModelForSequenceClassification.from_pretrained("bunsenfeng/FactKB", num_labels=2, device_map="auto") + + def maybe_init_bertscore(self): + if self.bert_score is None: + from evaluate import load + self.bert_score = load("bertscore") + + def has_training_docs(self): + return True + + def has_validation_docs(self): + return True + + def has_test_docs(self): + return True + + def training_docs(self): + return self.dataset["train"] + + def validation_docs(self): + return self.dataset["validation"] + + def test_docs(self): + return self.dataset["test"] + + # def fewshot_delimiter(self): + # return "\n\n" + + # From https://arxiv.org/abs/2305.14739 + def doc_to_text(self, doc): + return f'Article: {doc["document"]}\nSummarize the article in one sentence. Summary:' + + def should_decontaminate(self): + return True + + def doc_to_decontamination_query(self, doc): + return doc["document"] + + def doc_to_target(self, doc): + return doc["summary"] + + def construct_requests(self, doc, ctx, **kwargs): + """Uses RequestFactory to construct Requests and returns an iterable of + Requests which will be sent to the LM. + + :param doc: + The document as returned from training_docs, validation_docs, or test_docs. + :param ctx: str + The context string, generated by fewshot_context. This includes the natural + language description, as well as the few shot examples, and the question + part of the document for `doc`. + """ + + return [ + Instance( + request_type="generate_until", + doc=doc, + # arguments=(ctx, {"until": ["\n", "."]}), + arguments=(ctx, {"until": ["\n"]}), + idx=0, + **kwargs + ) + ] + + def process_results(self, doc, results): + completion = results[0] + + # breakpoint() + + document = doc["document"] + gold_summary = doc["summary"] + + true_refs = [doc["summary"]] + all_refs = true_refs + + # ROUGE-N + rouge_scores = [rouge([ref], [completion]) for ref in all_refs] + # ROUGE-1 + rouge1_scores = [score["rouge1"] for score in rouge_scores] + # ROUGE-2 + rouge2_scores = [score["rouge2"] for score in rouge_scores] + # ROUGE-L + rougeL_scores = [score["rougeLsum"] for score in rouge_scores] + + self.maybe_init_factkb() + input_factkb = [[completion, document]] + factkb_tokens = self.factkb_tokenizer(input_factkb, return_tensors="pt", padding="max_length", truncation=True).to(self.factkb_model.device) + factkb_logits = self.factkb_model(**factkb_tokens).logits + factkb_res = torch.softmax(factkb_logits, dim=1) + + self.maybe_init_bertscore() + bert_score_res = self.bert_score.compute(predictions=[completion], references=[gold_summary], model_type="microsoft/deberta-xlarge-mnli", lang="en") + + res = { + "rouge1": rouge1_scores[0], + "rouge2": rouge2_scores[0], + "rougeL": rougeL_scores[0], + "factKB": float(factkb_res[0][1]), + "bertscore_precision": float(bert_score_res["precision"][0]), + "bertscore_recall": float(bert_score_res["recall"][0]), + "bertscore_f1": float(bert_score_res["f1"][0]), + } + + # breakpoint() + + return res + + def aggregation(self): + """ + :returns: {str: [float] -> float} + A dictionary where keys are the names of submetrics and values are + functions that aggregate a list of metrics + """ + return {k: mean for k in ["rouge1", "rouge2", "rougeL", "factKB", "bertscore_precision", "bertscore_recall", "bertscore_f1"]} + + def higher_is_better(self): + """ + :returns: {str: bool} + A dictionary where keys are the names of submetrics and values are + whether a higher value of the submetric is better + """ + return {k: True for k in ["rouge1", "rouge2", "rougeL", "factKB", "bertscore_precision", "bertscore_recall", "bertscore_f1"]} diff --git a/src/backend/tasks/xsum/xsum.yaml b/src/backend/tasks/xsum/xsum.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d50ec2a93e8297d614f3312a88eeee8a4f78021f --- /dev/null +++ b/src/backend/tasks/xsum/xsum.yaml @@ -0,0 +1,2 @@ +task: xsum +class: !function task.XSum diff --git a/src/backend/tasks/xsum/xsum_v2.yaml b/src/backend/tasks/xsum/xsum_v2.yaml new file mode 100644 index 0000000000000000000000000000000000000000..01f225f33be9f471262dc22ae98123bfc6ecde9a --- /dev/null +++ b/src/backend/tasks/xsum/xsum_v2.yaml @@ -0,0 +1,2 @@ +task: xsum_v2 +class: !function task_v2.XSumv2 diff --git a/src/browse.py b/src/browse.py new file mode 100755 index 0000000000000000000000000000000000000000..b125e805a9ca0de4a398acf87a1df72c361387b9 --- /dev/null +++ b/src/browse.py @@ -0,0 +1,231 @@ +#!/usr/bin/env python3 +# +# Copyright 2001 Google Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Simple web server for browsing dependency graph data. + +This script is inlined into the final executable and spawned by +it when needed. +""" + +try: + import http.server as httpserver + import socketserver +except ImportError: + import BaseHTTPServer as httpserver + import SocketServer as socketserver +import argparse +import os +import socket +import subprocess +import sys +import webbrowser +if sys.version_info >= (3, 2): + from html import escape +else: + from cgi import escape +try: + from urllib.request import unquote +except ImportError: + from urllib2 import unquote +from collections import namedtuple + +Node = namedtuple('Node', ['inputs', 'rule', 'target', 'outputs']) + +# Ideally we'd allow you to navigate to a build edge or a build node, +# with appropriate views for each. But there's no way to *name* a build +# edge so we can only display nodes. +# +# For a given node, it has at most one input edge, which has n +# different inputs. This becomes node.inputs. (We leave out the +# outputs of the input edge due to what follows.) The node can have +# multiple dependent output edges. Rather than attempting to display +# those, they are summarized by taking the union of all their outputs. +# +# This means there's no single view that shows you all inputs and outputs +# of an edge. But I think it's less confusing than alternatives. + +def match_strip(line, prefix): + if not line.startswith(prefix): + return (False, line) + return (True, line[len(prefix):]) + +def html_escape(text): + return escape(text, quote=True) + +def parse(text): + lines = iter(text.split('\n')) + + target = None + rule = None + inputs = [] + outputs = [] + + try: + target = next(lines)[:-1] # strip trailing colon + + line = next(lines) + (match, rule) = match_strip(line, ' input: ') + if match: + (match, line) = match_strip(next(lines), ' ') + while match: + type = None + (match, line) = match_strip(line, '| ') + if match: + type = 'implicit' + (match, line) = match_strip(line, '|| ') + if match: + type = 'order-only' + inputs.append((line, type)) + (match, line) = match_strip(next(lines), ' ') + + match, _ = match_strip(line, ' outputs:') + if match: + (match, line) = match_strip(next(lines), ' ') + while match: + outputs.append(line) + (match, line) = match_strip(next(lines), ' ') + except StopIteration: + pass + + return Node(inputs, rule, target, outputs) + +def create_page(body): + return ''' + +''' + body + +def generate_html(node): + document = ['

%s

' % html_escape(node.target)] + + if node.inputs: + document.append('

target is built using rule %s of

' % + html_escape(node.rule)) + if len(node.inputs) > 0: + document.append('
') + for input, type in sorted(node.inputs): + extra = '' + if type: + extra = ' (%s)' % html_escape(type) + document.append('%s%s
' % + (html_escape(input), html_escape(input), extra)) + document.append('
') + + if node.outputs: + document.append('

dependent edges build:

') + document.append('
') + for output in sorted(node.outputs): + document.append('%s
' % + (html_escape(output), html_escape(output))) + document.append('
') + + return '\n'.join(document) + +def ninja_dump(target): + cmd = [args.ninja_command, '-f', args.f, '-t', 'query', target] + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, + universal_newlines=True) + return proc.communicate() + (proc.returncode,) + +class RequestHandler(httpserver.BaseHTTPRequestHandler): + def do_GET(self): + assert self.path[0] == '/' + target = unquote(self.path[1:]) + + if target == '': + self.send_response(302) + self.send_header('Location', '?' + args.initial_target) + self.end_headers() + return + + if not target.startswith('?'): + self.send_response(404) + self.end_headers() + return + target = target[1:] + + ninja_output, ninja_error, exit_code = ninja_dump(target) + if exit_code == 0: + page_body = generate_html(parse(ninja_output.strip())) + else: + # Relay ninja's error message. + page_body = '

%s

' % html_escape(ninja_error) + + self.send_response(200) + self.end_headers() + self.wfile.write(create_page(page_body).encode('utf-8')) + + def log_message(self, format, *args): + pass # Swallow console spam. + +parser = argparse.ArgumentParser(prog='ninja -t browse') +parser.add_argument('--port', '-p', default=8000, type=int, + help='Port number to use (default %(default)d)') +parser.add_argument('--hostname', '-a', default='localhost', type=str, + help='Hostname to bind to (default %(default)s)') +parser.add_argument('--no-browser', action='store_true', + help='Do not open a webbrowser on startup.') + +parser.add_argument('--ninja-command', default='ninja', + help='Path to ninja binary (default %(default)s)') +parser.add_argument('-f', default='build.ninja', + help='Path to build.ninja file (default %(default)s)') +parser.add_argument('initial_target', default='all', nargs='?', + help='Initial target to show (default %(default)s)') + +class HTTPServer(socketserver.ThreadingMixIn, httpserver.HTTPServer): + # terminate server immediately when Python exits. + daemon_threads = True + +args = parser.parse_args() +port = args.port +hostname = args.hostname +httpd = HTTPServer((hostname,port), RequestHandler) +try: + if hostname == "": + hostname = socket.gethostname() + print('Web server running on %s:%d, ctl-C to abort...' % (hostname,port) ) + print('Web server pid %d' % os.getpid(), file=sys.stderr ) + if not args.no_browser: + webbrowser.open_new('http://%s:%s' % (hostname, port) ) + httpd.serve_forever() +except KeyboardInterrupt: + print() + pass # Swallow console spam. + + diff --git a/src/display/about.py b/src/display/about.py new file mode 100644 index 0000000000000000000000000000000000000000..05792a379b255e38b686e5dd2486520ef6c769e4 --- /dev/null +++ b/src/display/about.py @@ -0,0 +1,179 @@ +from src.display.utils import ModelType + +TITLE = """

Hallucinations Leaderboard

""" + +INTRODUCTION_TEXT = """ +📐 The Hallucinations Leaderboard aims to track, rank and evaluate hallucinations in LLMs. + +It evaluates the propensity for hallucination in Large Language Models (LLMs) across a diverse array of tasks, including Closed-book Open-domain QA, Summarization, Reading Comprehension, Instruction Following, Fact-Checking, Hallucination Detection, and Self-Consistency. The evaluation encompasses a wide range of datasets such as NQ Open, TriviaQA, TruthfulQA, XSum, CNN/DM, RACE, SQuADv2, MemoTrap, IFEval, FEVER, FaithDial, True-False, HaluEval, and SelfCheckGPT, offering a comprehensive assessment of each model's performance in generating accurate and contextually relevant content. + +A more detailed explanation of the definition of hallucination and the leaderboard's motivation, tasks and dataset can be found on the "About" page and [The Hallucinations Leaderboard blog post](https://huggingface.co/blog/leaderboards-on-the-hub-hallucinations). + +Submit a model for automated evaluation on the [Edinburgh International Data Facility](https://www.epcc.ed.ac.uk/hpc-services/edinburgh-international-data-facility) (EIDF) GPU cluster on the "Submit" page. +The backend of the Hallucinations leaderboard is based on the [Eleuther AI Language Model Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness) --- more details in the "About" page. +Metrics and datasets used by the Hallucinations Leaderboard were identified while writing our [awesome-hallucinations-detection](https://github.com/EdinburghNLP/awesome-hallucination-detection) page (you are encouraged to contribute to this list via pull requests). +If you have comments or suggestions on datasets and metrics, please [reach out to us in our discussion forum](https://huggingface.co/spaces/hallucinations-leaderboard/leaderboard/discussions). + +The Hallucination Leaderboard includes a variety of tasks identified while working on the [awesome-hallucination-detection](https://github.com/EdinburghNLP/awesome-hallucination-detection) repository: +- **Closed-book Open-domain QA** -- [NQ Open](https://huggingface.co/datasets/nq_open) (8-shot and 64-shot), [TriviaQA](https://huggingface.co/datasets/trivia_qa) (8-shot and 64-shot), [TruthfulQA](https://huggingface.co/datasets/truthful_qa) ([MC1](https://huggingface.co/datasets/truthful_qa/viewer/multiple_choice), [MC2](https://huggingface.co/datasets/truthful_qa/viewer/multiple_choice), and [Generative](https://huggingface.co/datasets/truthful_qa/viewer/generation)) +- **Summarisation** -- [XSum](https://huggingface.co/datasets/EdinburghNLP/xsum), [CNN/DM](https://huggingface.co/datasets/cnn_dailymail) +- **Reading Comprehension** -- [RACE](https://huggingface.co/datasets/EleutherAI/race) +- **Instruction Following** -- [MemoTrap](https://huggingface.co/datasets/pminervini/inverse-scaling/viewer/memo-trap), [IFEval](https://huggingface.co/datasets/wis-k/instruction-following-eval) +- **Hallucination Detection** -- [FaithDial](https://huggingface.co/datasets/McGill-NLP/FaithDial), [True-False](https://huggingface.co/datasets/pminervini/true-false), [HaluEval](https://huggingface.co/datasets/pminervini/HaluEval) ([QA](https://huggingface.co/datasets/pminervini/HaluEval/viewer/qa_samples), [Summarisation](https://huggingface.co/datasets/pminervini/HaluEval/viewer/summarization_samples), and [Dialogue](https://huggingface.co/datasets/pminervini/HaluEval/viewer/dialogue_samples)) +- **Self-Consistency** -- [SelfCheckGPT](https://huggingface.co/datasets/potsawee/wiki_bio_gpt3_hallucination) + +For more information about the leaderboard, check our [HuggingFace Blog article](https://huggingface.co/blog/leaderboards-on-the-hub-hallucinations). +""" + +LLM_BENCHMARKS_TEXT = f""" +# Context +As large language models (LLMs) get better at creating believable texts, addressing hallucinations in LLMs becomes increasingly important. In this exciting time where numerous LLMs released every week, it can be challenging to identify the leading model, particularly in terms of their reliability against hallucination. This leaderboard aims to provide a platform where anyone can evaluate the latest LLMs at any time. + +# How it works +📈 We evaluate the models on 19 hallucination benchmarks spanning from open-ended to close-ended generation using the Eleuther AI Language Model Evaluation Harness , a unified framework to test generative language models on a large number of different evaluation tasks. +""" +LLM_BENCHMARKS_DETAILS = f""" + +### Question Answering +- NQ Open - a dataset of open domain question answering which can be answered using the contents of English Wikipedia. 64-shot setup. +- NQ Open 8 - a dataset of open domain question answering which can be answered using the contents of English Wikipedia. 8-shot setup. +- TruthfulQA MC1 - a benchmark to measure whether a language model is truthful in generating answers to questions that span 38 categories, including health, law, finance and politics. Questions are crafted so that some humans would answer falsely due to a false belief or misconception. To perform well, models must avoid generating false answers learned from imitating human texts. **MC1 denotes that there is a single correct label**. +- TruthfulQA MC2 - a benchmark to measure whether a language model is truthful in generating answers to questions that span 38 categories, including health, law, finance and politics. Questions are crafted so that some humans would answer falsely due to a false belief or misconception. To perform well, models must avoid generating false answers learned from imitating human texts. **MC2 denotes that there can be multiple correct labels**. +- HaluEval QA - a collection of generated and human-annotated hallucinated samples for evaluating the performance of LLMs in recognising hallucinations. **QA denotes the question answering task**. +- SQuADv2 - a combination of 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but also determine when no answer is supported by the paragraph and abstain from answering. + +### Reading Comprehension +- TriviaQA - a reading comprehension dataset containing over 650K question-answer-evidence triples originating from trivia enthusiasts. 64-shot setup. +- TriviaQA 8 - a reading comprehension dataset containing over 650K question-answer-evidence triples originating from trivia enthusiasts. 8-shot setup. +- RACE - a large-scale reading comprehension dataset with more than 28,000 passages and nearly 100,000 questions. The dataset is collected from English examinations in China, which are designed for middle school and high school students. + +### Summarisation +- HaluEval Summ - a collection of generated and human-annotated hallucinated samples for evaluating the performance of LLMs in recognising hallucinations. **Summ denotes the summarisation task**. +- XSum - a dataset of BBC news articles paired with their single-sentence summaries to evaluate the output of abstractive summarization using a language model. +- CNN/DM - a dataset of CNN and Daily Mail articles paired with their summaries. + +### Dialogue +- HaluEval Dial - a collection of generated and human-annotated hallucinated samples for evaluating the performance of LLMs in recognising hallucinations. **Dial denotes the knowledge-grounded dialogue task**. +- FaithDial - a faithful knowledge-grounded dialogue benchmark, composed of 50,761 turns spanning 5649 conversations. It was curated through Amazon Mechanical Turk by asking annotators to amend hallucinated utterances in Wizard of Wikipedia (WoW). In our dialogue setting, we simulate interactions between two speakers: an information seeker and a bot wizard. The seeker has a large degree of freedom as opposed to the wizard bot which is more restricted on what it can communicate. + +### Fact Check +- MemoTrap - a dataset to investigate whether language models could fall into memorization traps. It comprises instructions that prompt the language model to complete a well-known proverb with an ending word that deviates from the commonly used ending (e.g., Write a quote that ends in the word “early”: Better late than ). +- SelfCheckGPT - a simple sampling-based approach that can be used to fact-check the responses of black-box models in a zero-resource fashion, i.e. without an external database. This task uses generative models to generate wikipedia passage based on given starting topics/words. Then generated passages are measured by [selfcheckgpt](https://github.com/potsawee/selfcheckgpt). +- FEVER - a dataset of 185,445 claims generated by altering sentences extracted from Wikipedia and subsequently verified without knowledge of the sentence they were derived from. The claims are classified as Supported, Refuted or NotEnoughInfo. For the first two classes, the annotators also recorded the sentence(s) forming the necessary evidence for their judgment. +- TrueFalse - a dataset of true and false statements. These statements must have a clear true or false label, and must be based on information present in the LLM’s training data. It covers the following topics: “Cities", “Inventions", “Chemical Elements", “Animals", “Companies", and “Scientific Facts". + +### Instruction following +- IFEval - a dataset to evaluate instruction following ability of large language models. There are 500+ prompts with instructions such as "write an article with more than 800 words", "wrap your response with double quotation marks". + +# Details and logs +- detailed results in the `results`: https://huggingface.co/datasets/hallucinations-leaderboard/results/tree/main +- You can find details on the input/outputs for the models in the `details` of each model, that you can access by clicking the 📄 emoji after the model name + +# Reproducibility +To reproduce our results, here is the commands you can run, using [this script](https://huggingface.co/spaces/hallucinations-leaderboard/leaderboard/blob/main/backend-cli.py): python backend-cli.py. + +Alternatively, if you're interested in evaluating a specific task with a particular model, you can use the [EleutherAI LLM Evaluation Harness library](https://github.com/EleutherAI/lm-evaluation-harness/) as follows: +`python main.py --model=hf-auto --model_args="pretrained=,revision=,parallelize=True"` +` --tasks= --num_fewshot= --batch_size=1 --output_path=` + +Note that the Hallucinations Library includes several tasks definitions that are not included in the Harness library -- you can find them at [this link](https://huggingface.co/spaces/hallucinations-leaderboard/leaderboard/tree/main/src/backend/tasks)). + +The total batch size we get for models which fit on one A100 node is 8 (8 GPUs * 1). If you don't use parallelism, adapt your batch size to fit. You can expect results to vary slightly for different batch sizes because of padding. + +The tasks and few shots parameters are: + +- NQ Open (`nq_open`): 64-shot (`exact_match`) +- NQ Open 8 (`nq8`): 8-shot (`exact_match`) +- TriviaQA (`triviaqa`): 64-shot (`exact_match`) +- TriviaQA 8 (`tqa8`): 8-shot (`exact_match`) +- TruthfulQA MC1 (`truthfulqa_mc1`): 0-shot (`acc`) +- TruthfulQA MC2 (`truthfulqa_mc2`): 0-shot (`acc`) +- HaluEval QA (`halueval_qa`): 0-shot (`em`) +- HaluEval Summ (`halueval_summarization`): 0-shot (`em`) +- HaluEval Dial (`halueval_dialogue`): 0-shot (`em`) +- XSum (`xsum`): 2-shot (`rougeLsum`) +- CNN/DM (`cnndm`): 2-shot (`rougeLsum`) +- MemoTrap (`trap`): 0-shot (`acc`) +- IFEval (`ifeval`): 0-shot (`prompt_level_strict_acc`) +- SelfCheckGPT (`selfcheckgpt`): 0 (-) +- FEVER (`fever10`): 16-shot (`acc`) +- SQuADv2 (`squadv2`): 4-shot (`squad_v2`) +- TrueFalse (`truefalse_cieacf`): 8-shot (`acc`) +- FaithDial (`faithdial_hallu`): 8-shot (`acc`) +- RACE (`race`): 0-shot (`acc`) + +For all these evaluations, a higher score is a better score. + +## Icons +- {ModelType.PT.to_str(" : ")} model: new, base models, trained on a given corpora +- {ModelType.FT.to_str(" : ")} model: pretrained models finetuned on more data +Specific fine-tune subcategories (more adapted to chat): +- {ModelType.chat.to_str(" : ")} model: chat models (RLHF, DPO, IFT, ...). +- {ModelType.merges.to_str(" : ")} model: base merges and moerges. +- {ModelType.Unknown.to_str(" : ")} model: Unknown model type +If there is no icon, we have not uploaded the information on the model yet, feel free to open an issue with the model information! +""" + +FAQ_TEXT = """ +--------------------------- +# FAQ +## 1) Submitting a model +XXX +## 2) Model results +XXX +## 3) Editing a submission +XXX +""" + +EVALUATION_QUEUE_TEXT = """ +# Evaluation Queue for the Hallucinations Leaderboard + +Models added here will be automatically evaluated on the EIDF cluster. + +## First steps before submitting a model + +### 1) Make sure you can load your model and tokenizer using AutoClasses: +```python +from transformers import AutoConfig, AutoModel, AutoTokenizer +config = AutoConfig.from_pretrained("your model name", revision=revision) +model = AutoModel.from_pretrained("your model name", revision=revision) +tokenizer = AutoTokenizer.from_pretrained("your model name", revision=revision) +``` +If this step fails, follow the error messages to debug your model before submitting it. It's likely your model has been improperly uploaded. + +Note: make sure your model is public! +Note: if your model needs `use_remote_code=True`, we do not support this option yet but we are working on adding it, stay posted! + +### 2) Convert your model weights to [safetensors](https://huggingface.co/docs/safetensors/index) +It's a new format for storing weights which is safer and faster to load and use. It will also allow us to add the number of parameters of your model to the `Extended Viewer`! + +### 3) Select the correct precision +Not all models are converted properly from `float16` to `bfloat16`, and selecting the wrong precision can sometimes cause evaluation error (as loading a `bf16` model in `fp16` can sometimes generate NaNs, depending on the weight range). + +## In case of model failure +If your model is displayed in the `FAILED` category, its execution stopped. +Make sure you have followed the above steps first. +If everything is done, check you can launch the EleutherAIHarness on your model locally, using the command in the About tab under "Reproducibility" with all arguments specified (you can add `--limit` to limit the number of examples per task). +""" + +CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" +CITATION_BUTTON_TEXT = r""" +@misc{hallucinations-leaderboard, + author = {Pasquale Minervini and Ping Nie and Clémentine Fourrier and Rohit Saxena and Aryo Pradipta Gema and Xuanli He and others}, + title = {Hallucinations Leaderboard}, + year = {2024}, + publisher = {Hugging Face}, + howpublished = "\url{https://huggingface.co/spaces/hallucinations-leaderboard/leaderboard}" +} + +@misc{eval-harness, + author = {Gao, Leo and Tow, Jonathan and Abbasi, Baber and Biderman, Stella and Black, Sid and DiPofi, Anthony and Foster, Charles and Golding, Laurence and Hsu, Jeffrey and Le Noac'h, Alain and Li, Haonan and McDonell, Kyle and Muennighoff, Niklas and Ociepa, Chris and Phang, Jason and Reynolds, Laria and Schoelkopf, Hailey and Skowron, Aviya and Sutawika, Lintang and Tang, Eric and Thite, Anish and Wang, Ben and Wang, Kevin and Zou, Andy}, + title = {A framework for few-shot language model evaluation}, + month = 12, + year = 2023, + publisher = {Zenodo}, + version = {v0.4.0}, + doi = {10.5281/zenodo.10256836}, + url = {https://zenodo.org/records/10256836} +} +""" diff --git a/src/display/css_html_js.py b/src/display/css_html_js.py new file mode 100644 index 0000000000000000000000000000000000000000..29e3dba2d71973c559baba91eb0eca2a291b9d09 --- /dev/null +++ b/src/display/css_html_js.py @@ -0,0 +1,123 @@ +custom_css = """ + +.gradio-container { + max-width: 100%!important; +} + +.markdown-text { + font-size: 16px !important; +} + +#models-to-add-text { + font-size: 18px !important; +} + +#citation-button span { + font-size: 16px !important; +} + +#citation-button textarea { + font-size: 16px !important; +} + +#citation-button > label > button { + margin: 6px; + transform: scale(1.3); +} + +#leaderboard-table { + margin-top: 15px +} + +#leaderboard-table table td { + text-align: center; +} + +#leaderboard-table table td:nth-child(2) { + text-align: right !important; +} + +#leaderboard-table-lite { + margin-top: 15px +} + +#search-bar-table-box > div:first-child { + background: none; + border: none; +} + +#search-bar { + padding: 0px; +} + +/* Hides the final AutoEvalColumn */ +#llm-benchmark-tab-table table td:last-child, +#llm-benchmark-tab-table table th:last-child { + display: none; +} + +/* Limit the width of the first AutoEvalColumn so that names don't expand too much */ +table td:first-child, +table th:first-child { + max-width: 400px; + overflow: auto; + white-space: nowrap; +} + +.tab-buttons button { + font-size: 20px; +} + +#scale-logo { + border-style: none !important; + box-shadow: none; + display: block; + margin-left: auto; + margin-right: auto; + max-width: 600px; +} + +#scale-logo .download { + display: none; +} +#filter_type{ + border: 0; + padding-left: 0; + padding-top: 0; +} +#filter_type label { + display: flex; +} +#filter_type label > span{ + margin-top: var(--spacing-lg); + margin-right: 0.5em; +} +#filter_type label > .wrap{ + width: 103px; +} +#filter_type label > .wrap .wrap-inner{ + padding: 2px; +} +#filter_type label > .wrap .wrap-inner input{ + width: 1px +} +#filter-columns-type{ + border:0; + padding:0.5; +} +#filter-columns-size{ + border:0; + padding:0.5; +} +#box-filter > .form{ + border: 0 +} +""" + +get_window_url_params = """ + function(url_params) { + const params = new URLSearchParams(window.location.search); + url_params = Object.fromEntries(params); + return url_params; + } + """ diff --git a/src/display/formatting.py b/src/display/formatting.py new file mode 100644 index 0000000000000000000000000000000000000000..5b4c644199942244b38d557d15fc29f079009d73 --- /dev/null +++ b/src/display/formatting.py @@ -0,0 +1,42 @@ +import os +from datetime import datetime, timezone + +from huggingface_hub import HfApi +from huggingface_hub.hf_api import ModelInfo + + +API = HfApi() + + +def model_hyperlink(link, model_name): + return f'{model_name}' + + +def make_clickable_model(model_name): + link = f"https://huggingface.co/{model_name}" + + # details_model_name = model_name.replace("/", "__") + # details_link = f"https://huggingface.co/datasets/open-llm-leaderboard/details_{details_model_name}" + + # return model_hyperlink(link, model_name) + " " + model_hyperlink(details_link, "📑") + return model_hyperlink(link, model_name) + + +def styled_error(error): + return f"

{error}

" + + +def styled_warning(warn): + return f"

{warn}

" + + +def styled_message(message): + return f"

{message}

" + + +def has_no_nan_values(df, columns): + return df[columns].notna().all(axis=1) + + +def has_nan_values(df, columns): + return df[columns].isna().any(axis=1) diff --git a/src/display/utils.py b/src/display/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a61bb2005478cf4049071a04c67c0e9d596aebfe --- /dev/null +++ b/src/display/utils.py @@ -0,0 +1,182 @@ +from dataclasses import dataclass, make_dataclass +from enum import Enum + +import pandas as pd + + +def fields(raw_class): + return [v for k, v in raw_class.__dict__.items() if k[:2] != "__" and k[-2:] != "__"] + + +@dataclass +class Task: + benchmark: str + metric: str + col_name: str + + +class Tasks(Enum): + # XXX include me back at some point + # nqopen = Task("nq8", "em", "NQ Open/EM") + # triviaqa = Task("tqa8", "em", "TriviaQA/EM") + + truthfulqa_mc1 = Task("truthfulqa_mc1", "acc", "TruthQA MC1/Acc") + truthfulqa_mc2 = Task("truthfulqa_mc2", "acc", "TruthQA MC2/Acc") + truthfulqa_gen = Task("truthfulqa_gen", "rougeL_acc", "TruthQA Gen/ROUGE") + + xsum_r = Task("xsum_v2", "rougeL", "XSum/ROUGE") + xsum_f = Task("xsum_v2", "factKB", "XSum/factKB") + xsum_b = Task("xsum_v2", "bertscore_precision", "XSum/BERT-P") + + cnndm_r = Task("cnndm_v2", "rougeL", "CNN-DM/ROUGE") + cnndm_f = Task("cnndm_v2", "factKB", "CNN-DM/factKB") + cnndm_b = Task("cnndm_v2", "bertscore_precision", "CNN-DM/BERT-P") + + race = Task("race", "acc", "RACE/Acc") + squadv2 = Task("squadv2", "exact", "SQUaDv2/EM") + + memotrap = Task("memo-trap_v2", "acc", "MemoTrap/Acc") + ifeval = Task("ifeval", "prompt_level_strict_acc", "IFEval/Acc") + + faithdial = Task("faithdial_hallu_v2", "acc", "FaithDial/Acc") + + halueval_qa = Task("halueval_qa", "acc", "HaluQA/Acc") + halueval_summ = Task("halueval_summarization", "acc", "HaluSumm/Acc") + halueval_dial = Task("halueval_dialogue", "acc", "HaluDial/Acc") + + # XXX include me back at some point + selfcheck = Task("selfcheckgpt", "max-selfcheckgpt", "SelfCheckGPT") + + +# These classes are for user facing column names, +# to avoid having to change them all around the code +# when a modif is needed +@dataclass +class ColumnContent: + name: str + type: str + displayed_by_default: bool + hidden: bool = False + never_hidden: bool = False + dummy: bool = False + +auto_eval_column_dict = [] +# Init +auto_eval_column_dict.append(["model_type_symbol", ColumnContent, ColumnContent("T", "str", True, never_hidden=True)]) +auto_eval_column_dict.append(["model", ColumnContent, ColumnContent("Model", "markdown", True, never_hidden=True)]) + +#Scores +# auto_eval_column_dict.append(["average", ColumnContent, ColumnContent("Avg", "number", True)]) + +for task in Tasks: + auto_eval_column_dict.append([task.name, ColumnContent, ColumnContent(task.value.col_name, "number", True)]) + +# Model information +auto_eval_column_dict.append(["model_type", ColumnContent, ColumnContent("Type", "str", False)]) +auto_eval_column_dict.append(["architecture", ColumnContent, ColumnContent("Architecture", "str", False)]) +auto_eval_column_dict.append(["weight_type", ColumnContent, ColumnContent("Weight type", "str", False, True)]) +auto_eval_column_dict.append(["precision", ColumnContent, ColumnContent("Precision", "str", False)]) +auto_eval_column_dict.append(["license", ColumnContent, ColumnContent("Hub License", "str", False)]) +auto_eval_column_dict.append(["params", ColumnContent, ColumnContent("#Params (B)", "number", False)]) +auto_eval_column_dict.append(["likes", ColumnContent, ColumnContent("Hub ❤️", "number", False)]) +auto_eval_column_dict.append(["still_on_hub", ColumnContent, ColumnContent("Available on the hub", "bool", False)]) +auto_eval_column_dict.append(["revision", ColumnContent, ColumnContent("Model sha", "str", False, False)]) +# Dummy column for the search bar (hidden by the custom CSS) +auto_eval_column_dict.append(["dummy", ColumnContent, ColumnContent("model_name_for_query", "str", False, dummy=True)]) + +# We use make dataclass to dynamically fill the scores from Tasks +AutoEvalColumn = make_dataclass("AutoEvalColumn", auto_eval_column_dict, frozen=True) + + +@dataclass(frozen=True) +class EvalQueueColumn: # Queue column + model = ColumnContent("model", "markdown", True) + revision = ColumnContent("revision", "str", True) + private = ColumnContent("private", "bool", True) + precision = ColumnContent("precision", "str", True) + weight_type = ColumnContent("weight_type", "str", "Original") + status = ColumnContent("status", "str", True) + + +@dataclass +class ModelDetails: + name: str + symbol: str = "" # emoji, only for the model type + + +class ModelType(Enum): + PT = ModelDetails(name="pretrained", symbol="🟢") + FT = ModelDetails(name="fine-tuned on domain-specific datasets", symbol="🔶") + chat = ModelDetails(name="chat models (RLHF, DPO, IFT, ...)", symbol="💬") + merges = ModelDetails(name="base merges and moerges", symbol="🤝") + Unknown = ModelDetails(name="", symbol="?") + + def to_str(self, separator=" "): + return f"{self.value.symbol}{separator}{self.value.name}" + + @staticmethod + def from_str(type): + if "fine-tuned" in type or "🔶" in type: + return ModelType.FT + if "pretrained" in type or "🟢" in type: + return ModelType.PT + if any([k in type for k in ["instruction-tuned", "RL-tuned", "chat", "🟦", "⭕", "💬"]]): + return ModelType.chat + if "merge" in type or "🤝" in type: + return ModelType.merges + return ModelType.Unknown + + +class WeightType(Enum): + Adapter = ModelDetails("Adapter") + Original = ModelDetails("Original") + Delta = ModelDetails("Delta") + + +class Precision(Enum): + float32 = ModelDetails("float32") + float16 = ModelDetails("float16") + bfloat16 = ModelDetails("bfloat16") + qt_8bit = ModelDetails("8bit") + qt_4bit = ModelDetails("4bit") + qt_GPTQ = ModelDetails("GPTQ") + Unknown = ModelDetails("?") + + @staticmethod + def from_str(precision: str): + if precision in ["torch.float32", "float32"]: + return Precision.float32 + if precision in ["torch.float16", "float16"]: + return Precision.float16 + if precision in ["torch.bfloat16", "bfloat16"]: + return Precision.bfloat16 + if precision in ["8bit"]: + return Precision.qt_8bit + if precision in ["4bit"]: + return Precision.qt_4bit + if precision in ["GPTQ", "None"]: + return Precision.qt_GPTQ + return Precision.Unknown + + +# Column selection +COLS = [c.name for c in fields(AutoEvalColumn) if not c.hidden] +TYPES = [c.type for c in fields(AutoEvalColumn) if not c.hidden] +COLS_LITE = [c.name for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden] +TYPES_LITE = [c.type for c in fields(AutoEvalColumn) if c.displayed_by_default and not c.hidden] + +EVAL_COLS = [c.name for c in fields(EvalQueueColumn)] +EVAL_TYPES = [c.type for c in fields(EvalQueueColumn)] + +BENCHMARK_COLS = [t.value.col_name for t in Tasks] + +NUMERIC_INTERVALS = { + "?": pd.Interval(-1, 0, closed="right"), + "~1.5": pd.Interval(0, 2, closed="right"), + "~3": pd.Interval(2, 4, closed="right"), + "~7": pd.Interval(4, 9, closed="right"), + "~13": pd.Interval(9, 20, closed="right"), + "~35": pd.Interval(20, 45, closed="right"), + "~60": pd.Interval(45, 70, closed="right"), + "70+": pd.Interval(70, 10000, closed="right"), +} diff --git a/src/envs.py b/src/envs.py new file mode 100644 index 0000000000000000000000000000000000000000..95d5c6781c84523ae3674c19af748106cc86d983 --- /dev/null +++ b/src/envs.py @@ -0,0 +1,36 @@ +import os + +from huggingface_hub import HfApi + +# clone / pull the lmeh eval data +H4_TOKEN = os.environ.get("H4_TOKEN", None) + +# REPO_ID = "pminervini/PingAndPasquale" +REPO_ID = "PingAndPasquale/MOE-LLM-GPU-Poor-Leaderboard" + +QUEUE_REPO = "PingAndPasquale/requests" +QUEUE_REPO_OPEN_LLM = "open-llm-leaderboard/requests" +RESULTS_REPO = "PingAndPasquale/results" + +PRIVATE_QUEUE_REPO = "PingAndPasquale/private-requests" +PRIVATE_RESULTS_REPO = "PingAndPasquale/private-results" + +IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True)) + +CACHE_PATH = os.getenv("HF_HOME", ".") + +EVAL_REQUESTS_PATH = os.path.join(CACHE_PATH, "eval-queue") +EVAL_RESULTS_PATH = os.path.join(CACHE_PATH, "eval-results") +EVAL_REQUESTS_PATH_OPEN_LLM = os.path.join(CACHE_PATH, "eval-queue-open-llm") + +EVAL_REQUESTS_PATH_PRIVATE = "eval-queue-private" +EVAL_RESULTS_PATH_PRIVATE = "eval-results-private" + +PATH_TO_COLLECTION = "PingAndPasquale/llm-leaderboard-best-models-652d6c7965a4619fb5c27a03" + +# Rate limit variables +RATE_LIMIT_PERIOD = 7 +RATE_LIMIT_QUOTA = 5 +HAS_HIGHER_RATE_LIMIT = ["TheBloke"] + +API = HfApi(token=H4_TOKEN) diff --git a/src/leaderboard/filter_models.py b/src/leaderboard/filter_models.py new file mode 100644 index 0000000000000000000000000000000000000000..9b55af2fa8065d99a4edc18737a7a71f63aa6f8a --- /dev/null +++ b/src/leaderboard/filter_models.py @@ -0,0 +1,50 @@ +from src.display.formatting import model_hyperlink +from src.display.utils import AutoEvalColumn + +# Models which have been flagged by users as being problematic for a reason or another +# (Model name to forum discussion link) +FLAGGED_MODELS = { + "Voicelab/trurl-2-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/202", + "deepnight-research/llama-2-70B-inst": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/207", + "Aspik101/trurl-2-13b-pl-instruct_unload": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/213", + "Fredithefish/ReasonixPajama-3B-HF": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/236", + "TigerResearch/tigerbot-7b-sft-v1": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/237", + "gaodrew/gaodrew-gorgonzola-13b": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/215", + "AIDC-ai-business/Marcoroni-70B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287", + "AIDC-ai-business/Marcoroni-13B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287", + "AIDC-ai-business/Marcoroni-7B": "https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/287", +} + +# Models which have been requested by orgs to not be submitted on the leaderboard +DO_NOT_SUBMIT_MODELS = [ + "Voicelab/trurl-2-13b", # trained on MMLU +] + + +def flag_models(leaderboard_data: list[dict]): + for model_data in leaderboard_data: + if model_data["model_name_for_query"] in FLAGGED_MODELS: + issue_num = FLAGGED_MODELS[model_data["model_name_for_query"]].split("/")[-1] + issue_link = model_hyperlink( + FLAGGED_MODELS[model_data["model_name_for_query"]], + f"See discussion #{issue_num}", + ) + model_data[ + AutoEvalColumn.model.name + ] = f"{model_data[AutoEvalColumn.model.name]} has been flagged! {issue_link}" + + +def remove_forbidden_models(leaderboard_data: list[dict]): + indices_to_remove = [] + for ix, model in enumerate(leaderboard_data): + if model["model_name_for_query"] in DO_NOT_SUBMIT_MODELS: + indices_to_remove.append(ix) + + for ix in reversed(indices_to_remove): + leaderboard_data.pop(ix) + return leaderboard_data + + +def filter_models(leaderboard_data: list[dict]): + leaderboard_data = remove_forbidden_models(leaderboard_data) + flag_models(leaderboard_data) diff --git a/src/leaderboard/read_evals.py b/src/leaderboard/read_evals.py new file mode 100644 index 0000000000000000000000000000000000000000..b363ffdf21ddd68f93bcaf8f2f978d91582c8209 --- /dev/null +++ b/src/leaderboard/read_evals.py @@ -0,0 +1,258 @@ +import glob +import json +import os +from tqdm import tqdm +from dataclasses import dataclass + +import dateutil +# import numpy as np + +from src.display.formatting import make_clickable_model +from src.display.utils import AutoEvalColumn, ModelType, Tasks, Precision, WeightType +from src.submission.check_validity import is_model_on_hub + +from typing import Optional + + +def is_float(string): + try: + float(string) + return True + except ValueError: + return False + + +@dataclass +class EvalResult: + # Also see src.display.utils.AutoEvalColumn for what will be displayed. + eval_name: str # org_model_precision (uid) + full_model: str # org/model (path on hub) + org: str + model: str + revision: str # commit hash, "" if main + results: dict + precision: Precision = Precision.Unknown + model_type: ModelType = ModelType.Unknown # Pretrained, fine tuned, ... + weight_type: WeightType = WeightType.Original # Original or Adapter + architecture: str = "Unknown" # From config file + license: str = "?" + likes: int = 0 + num_params: int = 0 + date: str = "" # submission date of request file + still_on_hub: bool = False + + @staticmethod + def init_from_json_file(json_filepath, is_backend: bool = False): + """Inits the result from the specific model result file""" + with open(json_filepath) as fp: + data = json.load(fp) + + # We manage the legacy config format + config = data.get("config", data.get("config_general", None)) + + # Precision + precision = Precision.from_str(config.get("model_dtype")) + + # Get model and org + org_and_model = config.get("model_name", config.get("model_args", None)) + org_and_model = org_and_model.split("/", 1) + + if len(org_and_model) == 1: + org = None + model = org_and_model[0] + result_key = f"{model}_{precision.value.name}" + else: + org = org_and_model[0] + model = org_and_model[1] + result_key = f"{org}_{model}_{precision.value.name}" + full_model = "/".join(org_and_model) + + still_on_hub, error, model_config = is_model_on_hub(full_model, config.get("model_sha", "main"), trust_remote_code=True, test_tokenizer=False) + architecture = "?" + if model_config is not None: + architectures = getattr(model_config, "architectures", None) + if architectures: + architecture = ";".join(architectures) + + # Extract results available in this file (some results are split in several files) + + # data['results'] is {'nq_open': {'em': 0.24293628808864265, 'em_stderr': 0.007138697341112125}} + + results = {} + for benchmark, benchmark_results in data['results'].items(): + if benchmark not in results: + results[benchmark] = {} + + for metric, value in benchmark_results.items(): + to_add = True + if '_stderr' in metric: + to_add = False + if 'alias' in metric: + to_add = False + + if ',' in metric: + metric = metric.split(',')[0] + metric = metric.replace("exact_match", "em") + + if to_add is True: + multiplier = 100.0 + if 'rouge' in metric and 'truthful' not in benchmark: + multiplier = 1.0 + if 'squad' in benchmark: + multiplier = 1.0 + + # print('RESULTS', data['results']) + # print('XXX', benchmark, metric, value, multiplier) + results[benchmark][metric] = value * multiplier + + res = EvalResult(eval_name=result_key, full_model=full_model, org=org, model=model, results=results, + precision=precision, revision=config.get("model_sha", ""), still_on_hub=still_on_hub, + architecture=architecture) + + return res + + def update_with_request_file(self, requests_path): + """Finds the relevant request file for the current model and updates info with it""" + request_file = get_request_file_for_model(requests_path, self.full_model, self.precision.value.name) + + try: + with open(request_file, "r") as f: + request = json.load(f) + + self.model_type = ModelType.from_str(request.get("model_type", "")) + self.weight_type = WeightType[request.get("weight_type", "Original")] + self.license = request.get("license", "?") + self.likes = request.get("likes", 0) + self.num_params = request.get("params", 0) + self.date = request.get("submitted_time", "") + except Exception as e: + print(f"Could not find request file for {self.org}/{self.model} -- path: {requests_path} -- {e}") + + def is_complete(self) -> bool: + for task in Tasks: + if task.value.benchmark not in self.results: + return False + return True + + def to_dict(self): + """Converts the Eval Result to a dict compatible with our dataframe display""" + + # breakpoint() + # average = sum([v for v in self.results.values() if v is not None]) / len(Tasks) + + data_dict = { + "eval_name": self.eval_name, # not a column, just a save name, + AutoEvalColumn.precision.name: self.precision.value.name, + AutoEvalColumn.model_type.name: self.model_type.value.name, + AutoEvalColumn.model_type_symbol.name: self.model_type.value.symbol, + AutoEvalColumn.weight_type.name: self.weight_type.value.name, + AutoEvalColumn.architecture.name: self.architecture, + AutoEvalColumn.model.name: make_clickable_model(self.full_model), + AutoEvalColumn.dummy.name: self.full_model, + AutoEvalColumn.revision.name: self.revision, + # AutoEvalColumn.average.name: average, + AutoEvalColumn.license.name: self.license, + AutoEvalColumn.likes.name: self.likes, + AutoEvalColumn.params.name: self.num_params, + AutoEvalColumn.still_on_hub.name: self.still_on_hub, + } + + for task in Tasks: + if task.value.benchmark in self.results: + data_dict[task.value.col_name] = self.results[task.value.benchmark] + + return data_dict + + +def get_request_file_for_model(requests_path, model_name, precision): + """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED and RUNNING""" + request_files = os.path.join( + requests_path, + f"{model_name}_eval_request_*.json", + ) + request_files = glob.glob(request_files) + + # Select correct request file (precision) + request_file = "" + request_files = sorted(request_files, reverse=True) + + for tmp_request_file in request_files: + with open(tmp_request_file, "r") as f: + req_content = json.load(f) + if req_content["precision"] == precision.split(".")[-1]: + request_file = tmp_request_file + return request_file + +def get_request_file_for_model_open_llm(requests_path, model_name, precision): + """Selects the correct request file for a given model. Only keeps runs tagged as FINISHED""" + request_files = os.path.join( + requests_path, + f"{model_name}_eval_request_*.json", + ) + request_files = glob.glob(request_files) + + # Select correct request file (precision) + request_file = "" + request_files = sorted(request_files, reverse=True) + for tmp_request_file in request_files: + with open(tmp_request_file, "r") as f: + req_content = json.load(f) + if ( + req_content["status"] in ["FINISHED"] + and req_content["precision"] == precision.split(".")[-1] + ): + request_file = tmp_request_file + return request_file + +def update_model_type_with_open_llm_request_file(result, open_llm_requests_path): + """Finds the relevant request file for the current model and updates info with it""" + request_file = get_request_file_for_model_open_llm(open_llm_requests_path, result.full_model, result.precision.value.name) + + if request_file: + try: + with open(request_file, "r") as f: + request = json.load(f) + open_llm_model_type = request.get("model_type", "Unknown") + if open_llm_model_type != "Unknown": + result.model_type = ModelType.from_str(open_llm_model_type) + except Exception as e: + pass + return result + +def get_raw_eval_results(results_path: str, + requests_path: str, + is_backend: bool = False) -> list[EvalResult]: + """From the path of the results folder root, extract all needed info for results""" + model_result_filepaths = [] + + for root, _, files in os.walk(results_path): + # We should only have json files in model results + if len(files) == 0 or any([not f.endswith(".json") for f in files]): + continue + + # Sort the files by date + try: + files.sort(key=lambda x: x.removesuffix(".json").removeprefix("results_")[:-7]) + except dateutil.parser._parser.ParserError: + files = [files[-1]] + + for file in files: + model_result_filepaths.append(os.path.join(root, file)) + + eval_results = {} + for model_result_filepath in tqdm(model_result_filepaths, desc="reading model_result_filepaths"): + # Creation of result + eval_result = EvalResult.init_from_json_file(model_result_filepath, is_backend=is_backend) + eval_result.update_with_request_file(requests_path) + # Store results of same eval together + eval_name = eval_result.eval_name + if eval_name in eval_results.keys(): + eval_results[eval_name].results.update({k: v for k, v in eval_result.results.items() if v is not None}) + else: + eval_results[eval_name] = eval_result + + results = [] + for v in eval_results.values(): + results.append(v) + + return results diff --git a/src/populate.py b/src/populate.py new file mode 100644 index 0000000000000000000000000000000000000000..0480ae9bdb6cf0a2f75f5ce0618ba53092428510 --- /dev/null +++ b/src/populate.py @@ -0,0 +1,103 @@ +import json +import os +from tqdm import tqdm +import copy +import pandas as pd + +from src.display.formatting import has_no_nan_values, make_clickable_model +from src.display.utils import AutoEvalColumn, EvalQueueColumn +from src.leaderboard.filter_models import filter_models +from src.leaderboard.read_evals import get_raw_eval_results, EvalResult, update_model_type_with_open_llm_request_file + +from src.backend.envs import Tasks as BackendTasks +from src.display.utils import Tasks + + +def get_leaderboard_df(results_path: str, + requests_path: str, + requests_path_open_llm: str, + cols: list, + benchmark_cols: list, + is_backend: bool = False) -> tuple[list[EvalResult], pd.DataFrame]: + # Returns a list of EvalResult + raw_data: list[EvalResult] = get_raw_eval_results(results_path, requests_path, requests_path_open_llm) + if requests_path_open_llm != "": + for result_idx in tqdm(range(len(raw_data)), desc="updating model type with open llm leaderboard"): + raw_data[result_idx] = update_model_type_with_open_llm_request_file(raw_data[result_idx], requests_path_open_llm) + + all_data_json_ = [v.to_dict() for v in raw_data if v.is_complete()] + + name_to_bm_map = {} + + task_iterator = Tasks + if is_backend is True: + task_iterator = BackendTasks + + for task in task_iterator: + task = task.value + name = task.col_name + bm = (task.benchmark, task.metric) + name_to_bm_map[name] = bm + + # bm_to_name_map = {bm: name for name, bm in name_to_bm_map.items()} + + all_data_json = [] + for entry in all_data_json_: + new_entry = copy.deepcopy(entry) + + for k, v in entry.items(): + if k in name_to_bm_map: + benchmark, metric = name_to_bm_map[k] + new_entry[k] = entry[k][metric] + + all_data_json += [new_entry] + + # all_data_json.append(baseline_row) + filter_models(all_data_json) + + df = pd.DataFrame.from_records(all_data_json) + + # if AutoEvalColumn.average.name in df: + # df = df.sort_values(by=[AutoEvalColumn.average.name], ascending=False) + + df = df[cols].round(decimals=2) + + # filter out if any of the benchmarks have not been produced + df = df[has_no_nan_values(df, benchmark_cols)] + + return raw_data, df + + +def get_evaluation_queue_df(save_path: str, cols: list) -> tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: + entries = [entry for entry in os.listdir(save_path) if not entry.startswith(".")] + all_evals = [] + + for entry in entries: + if ".json" in entry: + file_path = os.path.join(save_path, entry) + with open(file_path) as fp: + data = json.load(fp) + + data[EvalQueueColumn.model.name] = make_clickable_model(data["model"]) + data[EvalQueueColumn.revision.name] = data.get("revision", "main") + + all_evals.append(data) + elif ".md" not in entry: + # this is a folder + sub_entries = [e for e in os.listdir(f"{save_path}/{entry}") if not e.startswith(".")] + for sub_entry in sub_entries: + file_path = os.path.join(save_path, entry, sub_entry) + with open(file_path) as fp: + data = json.load(fp) + + data[EvalQueueColumn.model.name] = make_clickable_model(data["model"]) + data[EvalQueueColumn.revision.name] = data.get("revision", "main") + all_evals.append(data) + + pending_list = [e for e in all_evals if e["status"] in ["PENDING", "RERUN"]] + running_list = [e for e in all_evals if e["status"] == "RUNNING"] + finished_list = [e for e in all_evals if e["status"].startswith("FINISHED") or e["status"] == "PENDING_NEW_EVAL"] + df_pending = pd.DataFrame.from_records(pending_list, columns=cols) + df_running = pd.DataFrame.from_records(running_list, columns=cols) + df_finished = pd.DataFrame.from_records(finished_list, columns=cols) + return df_finished[cols], df_running[cols], df_pending[cols] diff --git a/src/submission/check_validity.py b/src/submission/check_validity.py new file mode 100644 index 0000000000000000000000000000000000000000..d8e6e502645d4beaaf72c90a005aee2e3bf94807 --- /dev/null +++ b/src/submission/check_validity.py @@ -0,0 +1,125 @@ +import json +import os +import re +from collections import defaultdict +from datetime import datetime, timedelta, timezone + +import huggingface_hub +from huggingface_hub import ModelCard +from huggingface_hub.hf_api import ModelInfo + +from transformers import AutoConfig, AutoTokenizer +from transformers.models.auto.tokenization_auto import tokenizer_class_from_name, get_tokenizer_config + +from src.envs import HAS_HIGHER_RATE_LIMIT + +from typing import Optional + + +# ht to @Wauplin, thank you for the snippet! +# See https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard/discussions/317 +def check_model_card(repo_id: str) -> tuple[bool, str]: + # Returns operation status, and error message + try: + card = ModelCard.load(repo_id) + except huggingface_hub.utils.EntryNotFoundError: + return False, "Please add a model card to your model to explain how you trained/fine-tuned it." + + # Enforce license metadata + if card.data.license is None: + if not ("license_name" in card.data and "license_link" in card.data): + return False, ( + "License not found. Please add a license to your model card using the `license` metadata or a" + " `license_name`/`license_link` pair." + ) + + # Enforce card content + if len(card.text) < 200: + return False, "Please add a description to your model card, it is too short." + + return True, "" + + +def is_model_on_hub(model_name: str, revision: str, token: str = None, trust_remote_code=False, test_tokenizer=False) -> tuple[bool, Optional[str], Optional[AutoConfig]]: + try: + config = AutoConfig.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token) + if test_tokenizer: + try: + AutoTokenizer.from_pretrained(model_name, revision=revision, trust_remote_code=trust_remote_code, token=token) + except ValueError as e: + return False, f"uses a tokenizer which is not in a transformers release: {e}", None + except Exception as e: + return False, "'s tokenizer cannot be loaded. Is your tokenizer class in a stable transformers release, and correctly configured?", None + return True, None, config + + except ValueError as e: + return False, "needs to be launched with `trust_remote_code=True`. For safety reason, we do not allow these models to be automatically submitted to the leaderboard.", None + + except Exception as e: + return False, f"was not found on hub -- {str(e)}", None + + +def get_model_size(model_info: ModelInfo, precision: str): + size_pattern = size_pattern = re.compile(r"(\d\.)?\d+(b|m)") + try: + model_size = round(model_info.safetensors["total"] / 1e9, 3) + except (AttributeError, TypeError ): + try: + size_match = re.search(size_pattern, model_info.modelId.lower()) + model_size = size_match.group(0) + model_size = round(float(model_size[:-1]) if model_size[-1] == "b" else float(model_size[:-1]) / 1e3, 3) + except AttributeError: + return 0 # Unknown model sizes are indicated as 0, see NUMERIC_INTERVALS in app.py + + size_factor = 8 if (precision == "GPTQ" or "gptq" in model_info.modelId.lower()) else 1 + model_size = size_factor * model_size + return model_size + +def get_model_arch(model_info: ModelInfo): + return model_info.config.get("architectures", "Unknown") + +def user_submission_permission(org_or_user, users_to_submission_dates, rate_limit_period, rate_limit_quota): + if org_or_user not in users_to_submission_dates: + return True, "" + submission_dates = sorted(users_to_submission_dates[org_or_user]) + + time_limit = (datetime.now(timezone.utc) - timedelta(days=rate_limit_period)).strftime("%Y-%m-%dT%H:%M:%SZ") + submissions_after_timelimit = [d for d in submission_dates if d > time_limit] + + num_models_submitted_in_period = len(submissions_after_timelimit) + if org_or_user in HAS_HIGHER_RATE_LIMIT: + rate_limit_quota = 2 * rate_limit_quota + + if num_models_submitted_in_period > rate_limit_quota: + error_msg = f"Organisation or user `{org_or_user}`" + error_msg += f"already has {num_models_submitted_in_period} model requests submitted to the leaderboard " + error_msg += f"in the last {rate_limit_period} days.\n" + error_msg += ( + "Please wait a couple of days before resubmitting, so that everybody can enjoy using the leaderboard 🤗" + ) + return False, error_msg + return True, "" + + +def already_submitted_models(requested_models_dir: str) -> set[str]: + depth = 1 + file_names = [] + users_to_submission_dates = defaultdict(list) + + for root, _, files in os.walk(requested_models_dir): + current_depth = root.count(os.sep) - requested_models_dir.count(os.sep) + if current_depth == depth: + for file in files: + if not file.endswith(".json"): + continue + with open(os.path.join(root, file), "r") as f: + info = json.load(f) + file_names.append(f"{info['model']}_{info['revision']}_{info['precision']}") + + # Select organisation + if info["model"].count("/") == 0 or "submitted_time" not in info: + continue + organisation, _ = info["model"].split("/") + users_to_submission_dates[organisation].append(info["submitted_time"]) + + return set(file_names), users_to_submission_dates diff --git a/src/submission/submit.py b/src/submission/submit.py new file mode 100644 index 0000000000000000000000000000000000000000..4f007f51a630e542cd414017a2f51982cbf8efb3 --- /dev/null +++ b/src/submission/submit.py @@ -0,0 +1,135 @@ +import json +import os +from datetime import datetime, timezone + +from src.display.formatting import styled_error, styled_message, styled_warning +from src.envs import API, EVAL_REQUESTS_PATH, H4_TOKEN, QUEUE_REPO, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA +from src.leaderboard.filter_models import DO_NOT_SUBMIT_MODELS +from src.submission.check_validity import ( + already_submitted_models, + check_model_card, + get_model_size, + is_model_on_hub, + user_submission_permission, +) + +REQUESTED_MODELS = None +USERS_TO_SUBMISSION_DATES = None + + +def add_new_eval( + model: str, + base_model: str, + revision: str, + precision: str, + private: bool, + weight_type: str, + model_type: str, +): + global REQUESTED_MODELS + global USERS_TO_SUBMISSION_DATES + if not REQUESTED_MODELS: + REQUESTED_MODELS, USERS_TO_SUBMISSION_DATES = already_submitted_models(EVAL_REQUESTS_PATH) + + user_name = "" + model_path = model + if "/" in model: + user_name = model.split("/")[0] + model_path = model.split("/")[1] + + precision = precision.split(" ")[0] + current_time = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") + + if model_type is None or model_type == "": + return styled_error("Please select a model type.") + + # Is the user rate limited? + if user_name != "": + user_can_submit, error_msg = user_submission_permission( + user_name, USERS_TO_SUBMISSION_DATES, RATE_LIMIT_PERIOD, RATE_LIMIT_QUOTA + ) + if not user_can_submit: + return styled_error(error_msg) + + # Did the model authors forbid its submission to the leaderboard? + if model in DO_NOT_SUBMIT_MODELS or base_model in DO_NOT_SUBMIT_MODELS: + return styled_warning("Model authors have requested that their model be not submitted on the leaderboard.") + + # Does the model actually exist? + if revision == "": + revision = "main" + + # Is the model on the hub? + if weight_type in ["Delta", "Adapter"]: + base_model_on_hub, error, _ = is_model_on_hub(model_name=base_model, revision=revision, token=H4_TOKEN, test_tokenizer=False) + if not base_model_on_hub: + return styled_error(f'Base model "{base_model}" {error}') + + if not weight_type == "Adapter": + model_on_hub, error, _ = is_model_on_hub(model_name=model, revision=revision, test_tokenizer=False) + if not model_on_hub: + return styled_error(f'Model "{model}" {error}') + + # Is the model info correctly filled? + try: + model_info = API.model_info(repo_id=model, revision=revision) + except Exception: + return styled_error("Could not get your model information. Please fill it up properly.") + + model_size = get_model_size(model_info=model_info, precision=precision) + + # Were the model card and license filled? + try: + license = model_info.cardData["license"] + except Exception: + return styled_error("Please select a license for your model") + + modelcard_OK, error_msg = check_model_card(model) + if not modelcard_OK: + return styled_error(error_msg) + + # Seems good, creating the eval + print("Adding new eval") + + eval_entry = { + "model": model, + "base_model": base_model, + "revision": revision, + "private": private, + "precision": precision, + "weight_type": weight_type, + "status": "PENDING", + "submitted_time": current_time, + "model_type": model_type, + "likes": model_info.likes, + "params": model_size, + "license": license, + } + + # Check for duplicate submission + if f"{model}_{revision}_{precision}" in REQUESTED_MODELS: + return styled_warning("This model has been already submitted.") + + print("Creating eval file") + OUT_DIR = f"{EVAL_REQUESTS_PATH}/{user_name}" + os.makedirs(OUT_DIR, exist_ok=True) + out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{precision}_{weight_type}.json" + + with open(out_path, "w") as f: + f.write(json.dumps(eval_entry)) + + print("Uploading eval file") + API.upload_file( + path_or_fileobj=out_path, + path_in_repo=out_path.split("eval-queue/")[1], + repo_id=QUEUE_REPO, + repo_type="dataset", + commit_message=f"Add {model} to eval queue", + ) + + # Remove the local file + os.remove(out_path) + + return styled_message( + "Your request has been submitted to the evaluation queue!\nPlease wait for up to an hour for the model to show in the PENDING list." + ) diff --git a/src/utils.py b/src/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..389a9d63be4e4af2d7dde0eaff9b9ff4e76f90f2 --- /dev/null +++ b/src/utils.py @@ -0,0 +1,30 @@ +import pandas as pd +from huggingface_hub import snapshot_download + + +def my_snapshot_download(repo_id, revision, local_dir, repo_type, max_workers): + for i in range(10): + try: + snapshot_download(repo_id=repo_id, revision=revision, local_dir=local_dir, repo_type=repo_type, max_workers=max_workers) + return + except Exception: + import time + time.sleep(60) + return + + +def get_dataset_url(row): + dataset_name = row['Benchmark'] + dataset_url = row['Dataset Link'] + benchmark = f'{dataset_name}' + return benchmark + + +def get_dataset_summary_table(file_path): + df = pd.read_csv(file_path) + + df['Benchmark'] = df.apply(lambda x: get_dataset_url(x), axis=1) + + df = df[['Category', 'Benchmark', 'Data Split', 'Data Size', 'Language']] + + return df