Spaces:
Running
Running
import gradio as gr | |
import pandas as pd | |
import os | |
from apscheduler.schedulers.background import BackgroundScheduler | |
from huggingface_hub import HfApi | |
from uploads import add_new_eval | |
CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results" | |
CITATION_BUTTON_TEXT = r"""@misc{tofu2024, | |
title={TOFU: A Task of Fictitious Unlearning for LLMs}, | |
author={Pratyush Maini and Zhili Feng and Avi Schwarzschild and Zachary Lipton and Zico Kolter}, | |
year={2024}, | |
archivePrefix={arXiv}, | |
primaryClass={cs.LG} | |
}""" | |
api = HfApi() | |
TOKEN = os.environ.get("TOKEN", None) | |
LEADERBOARD_PATH = f"locuslab/tofu_leaderboard" | |
def restart_space(): | |
api.restart_space(repo_id=LEADERBOARD_PATH, token=TOKEN) | |
# Function to load data from a given CSV file | |
def baseline_load_data(model,version,metrics): | |
version = version.replace("%", "p") | |
file_path = f'versions/{model}-{version}/{model}-{version}.csv' # Replace with your file paths | |
df = pd.read_csv(file_path) | |
# we only want specific columns and in a specific order | |
column_names = ["Method", "Submitted By", | |
"Model Utility", "Forget Quality", | |
"ROUGE Real Authors", "Truth Ratio Real Authors", "Prob. Real Authors", | |
"ROUGE Real World", "Truth Ratio Real World", "Prob. Real World", | |
"ROUGE Retain", "Truth Ratio Retain", "Prob. Retain", | |
"ROUGE Forget", "Truth Ratio Forget", "Prob. Forget", | |
] | |
#based on the metrics, remove the columns that are not needed | |
if "ROUGE" not in metrics: | |
column_names = [x for x in column_names if "ROUGE" not in x] | |
if "Truth Ratio" not in metrics: | |
column_names = [x for x in column_names if "Truth Ratio" not in x] | |
if "Prob." not in metrics: | |
column_names = [x for x in column_names if "Prob." not in x] | |
#if there is a column with name WD, modify each entry in Method to include WD: method (WD = wd) | |
if "WD" in df.columns: | |
#get the WD column entry for each row and add it to the method name | |
df["Method"] = df["Method"] + " (WD = " + df["WD"].astype(str) + ")" | |
df = df[column_names] | |
# if there are multiple rows with the same method, keep only the one with the highest product of model utility and forget quality | |
product = df["Model Utility"] * df["Forget Quality"] | |
df["product"] = product | |
df = df.sort_values(by="product", ascending=False) | |
df = df.drop_duplicates(subset=["Method"], keep="first") | |
df = df.drop(columns=["product"]) | |
return df | |
def load_data(model, version, metrics): | |
baseline_df = baseline_load_data(model, version, metrics) | |
# now for every file in "versions/{model}-{version}/*.csv" | |
# if file name is not "model-version.csv", load the file and append it to the dataframe | |
version = version.replace("%", "p") | |
for file in os.listdir(f'versions/{model}-{version}'): | |
if file == f"{model}-{version}.csv": | |
continue | |
df = pd.read_csv(f'versions/{model}-{version}/{file}') | |
df = df[baseline_df.columns] | |
baseline_df = pd.concat([baseline_df, df]) | |
return baseline_df | |
# Function for searching in the leaderboard | |
def search_leaderboard(df, query): | |
if query == "": | |
return df | |
else: | |
return df[df['Method'].str.contains(query)] | |
# Function to change the version of the leaderboard | |
def change_version(model, version, metrics): | |
new_df = load_data(model, version, metrics) | |
return new_df | |
# Initialize Gradio app | |
demo = gr.Blocks() | |
with demo: | |
gr.Markdown(""" | |
## π₯ TOFU Leaderboard | |
The TOFU dataset is a benchmark designed to evaluate the unlearning performance of large language models in realistic scenarios. This unique dataset consists of question-answer pairs that are based on the autobiographies of 200 fictitious authors, entirely generated by the GPT-4 model. The primary objective of this task is to effectively unlearn a fine-tuned model using different portions of the forget set. | |
Read more at [https://locuslab.github.io/tofu/](https://locuslab.github.io/tofu/). | |
""") | |
with gr.Row(): | |
with gr.Accordion("π Citation", open=False): | |
citation_button = gr.Textbox( | |
value=CITATION_BUTTON_TEXT, | |
label=CITATION_BUTTON_LABEL, | |
elem_id="citation-button", | |
show_copy_button=True, | |
) #.style(show_copy_button=True) | |
with gr.Tabs(): | |
with gr.TabItem("Leaderboard"): | |
with gr.Row(): | |
version_dropdown = gr.Dropdown( | |
choices=["1%", "5%", "10%"], | |
label="π Select Forget Percentage", | |
value="10%", | |
) | |
model_dropdown = gr.Dropdown( | |
choices=["llama", "phi"], | |
label="π Select Base Model", | |
value="llama", | |
) | |
with gr.Row(): | |
metrics_checkbox = gr.CheckboxGroup( | |
label="Select Metrics", | |
choices=["ROUGE", "Truth Ratio", "Prob."], | |
value = ["ROUGE", "Truth Ratio", "Prob."], | |
) | |
with gr.Row(): | |
search_bar = gr.Textbox( | |
placeholder="Search for methods...", | |
show_label=False, | |
) | |
leaderboard_table = gr.components.Dataframe( | |
value=load_data("llama", "10%", ["ROUGE", "Truth Ratio", "Prob."]), | |
interactive=True, | |
visible=True, | |
) | |
version_dropdown.change( | |
change_version, | |
inputs=[model_dropdown,version_dropdown,metrics_checkbox], | |
outputs=leaderboard_table | |
) | |
model_dropdown.change( | |
change_version, | |
inputs=[model_dropdown,version_dropdown,metrics_checkbox], | |
outputs=leaderboard_table | |
) | |
search_bar.change( | |
search_leaderboard, | |
inputs=[leaderboard_table, search_bar,metrics_checkbox], | |
outputs=leaderboard_table | |
) | |
metrics_checkbox.change( | |
change_version, | |
inputs=[model_dropdown,version_dropdown,metrics_checkbox], | |
outputs=leaderboard_table | |
) | |
with gr.Accordion("Submit a new model for evaluation"): | |
with gr.Row(): | |
with gr.Column(): | |
method_name_textbox = gr.Textbox(label="Method name") | |
#llama, phi | |
model_family_radio = gr.Radio(["llama", "phi"], value="llama", label="Model family") | |
forget_rate_radio = gr.Radio(["1%", "5%", "10%"], value="10%", label="Forget rate") | |
url_textbox = gr.Textbox(label="Url to model information") | |
with gr.Column(): | |
organisation = gr.Textbox(label="Organisation") | |
mail = gr.Textbox(label="Contact email") | |
file_output = gr.File() | |
submit_button = gr.Button("Submit Eval") | |
submission_result = gr.Markdown() | |
submit_button.click( | |
add_new_eval, | |
[ | |
method_name_textbox, | |
model_family_radio, | |
forget_rate_radio, | |
url_textbox, | |
file_output, | |
organisation, | |
], | |
submission_result, | |
) | |
gr.Markdown(""" | |
## Quick Links | |
- [**Website**](https://locuslab.github.io/tofu): The landing page for TOFU | |
- [**arXiv Paper**](http://arxiv.org/abs/2401.06121): Detailed information about the TOFU dataset and its significance in unlearning tasks. | |
- [**GitHub Repository**](https://github.com/locuslab/tofu): Access the source code, fine-tuning scripts, and additional resources for the TOFU dataset. | |
- [**Dataset on Hugging Face**](https://huggingface.co/datasets/locuslab/TOFU): Direct link to download the TOFU dataset. | |
- [**Leaderboard on Hugging Face Spaces**](https://huggingface.co/spaces/locuslab/tofu_leaderboard): Current rankings and submissions for the TOFU dataset challenges. | |
- [**Summary on Twitter**](https://x.com/_akhaliq/status/1745643293839327268): A concise summary and key takeaways from the project. | |
## Applicability π | |
The dataset is in QA format, making it ideal for use with popular chat models such as Llama2, Mistral, or Qwen. However, it also works for any other large language model. The corresponding code base is written for the Llama2 model, but can be easily adapted to other models. | |
## Installation | |
``` | |
conda create -n tofu python=3.10 | |
conda activate tofu | |
conda install pytorch pytorch-cuda=11.8 -c pytorch -c nvidia | |
conda install -c "nvidia/label/cuda-11.8.0" cuda-toolkit | |
pip install -r requirements.txt | |
``` | |
## Loading the Dataset | |
To load the dataset, use the following code: | |
```python | |
from datasets import load_dataset | |
dataset = load_dataset("locuslab/TOFU","full") | |
``` | |
""") | |
# scheduler = BackgroundScheduler() | |
# scheduler.add_job(restart_space, "interval", seconds=1800) | |
# scheduler.start() | |
# demo.queue(default_concurrency_limit=40).launch() | |
# demo.launch() | |
scheduler = BackgroundScheduler() | |
scheduler.add_job(restart_space, "interval", seconds=3600) | |
scheduler.start() | |
demo.launch(debug=True) | |