File size: 10,865 Bytes
b182afd
 
1c6d55d
b182afd
1c6d55d
 
 
 
 
 
 
 
 
 
 
 
 
 
fd71a7a
b182afd
1c6d55d
 
 
 
a246870
 
1c6d55d
 
 
c94c38d
 
 
 
 
 
1c6d55d
 
 
b182afd
 
2916d58
30c2633
 
 
 
cb09ce6
30c2633
 
 
 
765f337
30c2633
e701d13
765f337
eab7ed9
8f0d64b
30c2633
 
8f0d64b
e701d13
30c2633
01ef3bc
864023f
30c2633
 
a246870
 
1c6d55d
 
a246870
 
 
 
 
 
 
1c6d55d
 
 
 
 
 
 
 
 
 
a246870
 
1c6d55d
 
 
 
 
b182afd
1c6d55d
 
81618ab
1c6d55d
 
 
b182afd
1c6d55d
 
 
 
 
 
 
 
b182afd
1c6d55d
 
 
30c2633
d410a83
30c2633
e701d13
1c6d55d
30c2633
 
 
 
1c6d55d
30c2633
765f337
30c2633
eab7ed9
1c6d55d
a246870
 
 
 
 
b182afd
 
e701d13
1c6d55d
b182afd
 
30c2633
4596351
c8f0900
35e9319
c8f0900
35e9319
c8f0900
30c2633
c8f0900
35e9319
c8f0900
35e9319
c8f0900
35e9319
c8f0900
35e9319
c8f0900
35e9319
c8f0900
4596351
35e9319
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1c6d55d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b182afd
1c6d55d
b182afd
1c6d55d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
import gradio as gr
import pandas as pd
import os
from apscheduler.schedulers.background import BackgroundScheduler
from huggingface_hub import HfApi
from uploads import add_new_eval

CITATION_BUTTON_LABEL = "Copy the following snippet to cite these results"
CITATION_BUTTON_TEXT = r"""@misc{tofu2024,
      title={TOFU: A Task of Fictitious Unlearning for LLMs}, 
      author={Pratyush Maini and Zhili Feng and Avi Schwarzschild and Zachary Lipton and Zico Kolter},
      year={2024},
      archivePrefix={arXiv},
      primaryClass={cs.LG}
}"""

api = HfApi()
TOKEN = os.environ.get("TOKEN", None)
LEADERBOARD_PATH = f"boyiwei/CoTaEval_leaderboard"
def restart_space():
    api.restart_space(repo_id=LEADERBOARD_PATH, token=TOKEN)


# Function to load data from a given CSV file
def baseline_load_data(model, dataset, setting, criteria):
    file_path = f'versions/{model}_{dataset}_{setting}_{criteria}.csv'  # Replace with your file paths
    df = pd.read_csv(file_path)
    
    # we only want specific columns and in a specific order
    if dataset == 'news':
        column_names = ["model_name","method","rouge1","rougeL","semantic_sim","LCS(character)","LCS(word)","ACS(word)","Levenshtein Distance","Minhash Similarity", 
                    "MMLU","MT-Bench","Blocklisted F1","In-Domain F1","Efficiency"]
    elif dataset == 'books':
        column_names = ["model_name","method","bleu","rouge1","rougeL","semantic_sim","LCS(character)","LCS(word)","ACS(word)","Levenshtein Distance","Minhash Similarity", 
                    "MMLU","MT-Bench","Blocklisted rougeL","In-Domain rougeL","Efficiency"
                    ]
    df = df[column_names]
    
    return df

def update_dropdowns(setting, dataset, model, criteria):
    updates = {
        "setting": gr.update(interactive=True),
        "dataset": gr.update(interactive=True),
        "model": gr.update(interactive=True),
        "criteria": gr.update(interactive=True),
    }
    
    if setting == "memorization":
        updates["dataset"] = gr.update(value="news", interactive=False)
        updates["model"] = gr.update(value="llama2-7b-chat-hf-newsqa", interactive=False)
    elif dataset == "books":
        updates["setting"] = gr.update(value="rag", interactive=False)
        if model == "llama2-7b-chat-hf-newsqa":
            updates["model"] = gr.update(value="llama2-7b-chat-hf", interactive=True)
    elif model == "llama2-7b-chat-hf-newsqa":
        updates["setting"] = gr.update(value="memorization", interactive=False)
        updates["dataset"] = gr.update(value="news", interactive=False)
    elif model != "llama2-7b-chat-hf-newsqa":
        updates["setting"] = gr.update(value="rag", interactive=False)
    
    return updates["model"], updates["dataset"], updates["setting"], updates["criteria"]

    

def load_data(model, dataset, setting, criteria):
    baseline_df = baseline_load_data(model, dataset, setting, criteria)
    # now for every file in "versions/{model}-{version}/*.csv"
    # if file name is not "model-version.csv", load the file and append it to the dataframe
    # version = version.replace("%", "p")
    # for file in os.listdir(f'versions/{model}-{version}'):
    #     if file == f"{model}-{version}.csv":
    #         continue
    #     df = pd.read_csv(f'versions/{model}-{version}/{file}')
    #     df = df[baseline_df.columns]
    #     baseline_df = pd.concat([baseline_df, df])
    return baseline_df

# Function for searching in the leaderboard
def search_leaderboard(df, query):
    if query == "":
        return df
    else:
        return df[df['Method'].str.contains(query)]

# Function to change the version of the leaderboard
def change_version(model, dataset, setting, criteria):
    new_df = load_data(model, dataset, setting, criteria)
    return new_df


# Initialize Gradio app
demo = gr.Blocks()

with demo:
    gr.Markdown("""
    ## πŸ₯‡ CoTAEval Leaderboard
    The TOFU dataset is a benchmark designed to evaluate the unlearning performance of large language models in realistic scenarios. This unique dataset consists of question-answer pairs that are based on the autobiographies of 200 fictitious authors, entirely generated by the GPT-4 model. The primary objective of this task is to effectively unlearn a fine-tuned model using different portions of the forget set.
    Read more at [https://locuslab.github.io/tofu/](https://locuslab.github.io/tofu/).
    """)

    with gr.Row():
        with gr.Accordion("πŸ“™ Citation", open=False):
            citation_button = gr.Textbox(
                value=CITATION_BUTTON_TEXT,
                label=CITATION_BUTTON_LABEL,
                elem_id="citation-button",
                show_copy_button=True,
            ) #.style(show_copy_button=True)

    with gr.Tabs():
        with gr.TabItem("Leaderboard"):
            with gr.Row():
                setting_dropdown = gr.Dropdown(
                    choices = ["rag", "memorization"],
                    label="πŸ”„ Select Setting",
                    value="rag",
                )
                dataset_dropdown = gr.Dropdown(
                    choices = ['news', 'books'],
                    label="πŸ”„ Select Dataset",
                    value="news",
                )
                model_dropdown = gr.Dropdown(
                    choices=["llama2-7b-chat-hf", "llama2-70b-chat-hf", "dbrx-instruct", "llama2-7b-chat-hf-newsqa"],
                    label="πŸ”„ Select Model",
                    value="llama2-7b-chat-hf",
                )
                criteria_dropdown = gr.Dropdown(
                    choices=['mean', 'max'],
                    label = "πŸ”„ Select Criteria",
                    value = 'mean',
                )

            leaderboard_table = gr.components.Dataframe(
                value=load_data("llama2-7b-chat-hf", "news", "rag", "mean"),
                interactive=True,
                visible=True,
            )
            
            
            # setting_dropdown.change(
            #     update_dropdowns,
            #     inputs=[model_dropdown, dataset_dropdown, setting_dropdown, criteria_dropdown],
            #     outputs=[model_dropdown, dataset_dropdown, setting_dropdown, criteria_dropdown]
            # )
            
            # dataset_dropdown.change(
            #     update_dropdowns,
            #     inputs=[model_dropdown, dataset_dropdown, setting_dropdown, criteria_dropdown],
            #     outputs=[model_dropdown, dataset_dropdown, setting_dropdown, criteria_dropdown]
            # )

            # model_dropdown.change(
            #     update_dropdowns,
            #     inputs=[model_dropdown, dataset_dropdown, setting_dropdown, criteria_dropdown],
            #     outputs=[model_dropdown, dataset_dropdown, setting_dropdown, criteria_dropdown]
            # )
            
            setting_dropdown.change(
                change_version,
                inputs=[model_dropdown, dataset_dropdown, setting_dropdown, criteria_dropdown],
                outputs=leaderboard_table
            )
            
            dataset_dropdown.change(
                change_version,
                inputs=[model_dropdown, dataset_dropdown, setting_dropdown, criteria_dropdown],
                outputs=leaderboard_table
            )
            
            model_dropdown.change(
                change_version,
                inputs=[model_dropdown, dataset_dropdown, setting_dropdown, criteria_dropdown],
                outputs=leaderboard_table
            )
            
            criteria_dropdown.change(
                change_version,
                inputs=[model_dropdown, dataset_dropdown, setting_dropdown, criteria_dropdown],
                outputs=leaderboard_table
            )
    
    with gr.Accordion("Submit a new model for evaluation"):
        with gr.Row():
            with gr.Column():
                method_name_textbox = gr.Textbox(label="Method name")
                #llama, phi
                model_family_radio = gr.Radio(["llama", "phi"], value="llama", label="Model family")
                forget_rate_radio = gr.Radio(["1%", "5%", "10%"], value="10%", label="Forget rate")
                url_textbox = gr.Textbox(label="Url to model information")
            with gr.Column():
                organisation = gr.Textbox(label="Organisation")
                mail = gr.Textbox(label="Contact email")
                file_output = gr.File()
                


        submit_button = gr.Button("Submit Eval")
        submission_result = gr.Markdown()
        submit_button.click(
            add_new_eval,
            [
                method_name_textbox,
                model_family_radio,
                forget_rate_radio,
                url_textbox,
                file_output,
                organisation,
                mail
            ],
            submission_result,
        )




    gr.Markdown("""
    ## Quick Links

    - [**Website**](https://locuslab.github.io/tofu): The landing page for TOFU
    - [**arXiv Paper**](http://arxiv.org/abs/2401.06121): Detailed information about the TOFU dataset and its significance in unlearning tasks.
    - [**GitHub Repository**](https://github.com/locuslab/tofu): Access the source code, fine-tuning scripts, and additional resources for the TOFU dataset.
    - [**Dataset on Hugging Face**](https://huggingface.co/datasets/locuslab/TOFU): Direct link to download the TOFU dataset.
    - [**Leaderboard on Hugging Face Spaces**](https://huggingface.co/spaces/locuslab/tofu_leaderboard): Current rankings and submissions for the TOFU dataset challenges.
    - [**Summary on Twitter**](https://x.com/_akhaliq/status/1745643293839327268): A concise summary and key takeaways from the project.

    ## Applicability πŸš€

    The dataset is in QA format, making it ideal for use with popular chat models such as Llama2, Mistral, or Qwen. However, it also works for any other large language model. The corresponding code base is written for the Llama2 model, but can be easily adapted to other models.
    
    ## Installation
    
    ```
    conda create -n tofu python=3.10
    conda activate tofu
    conda install pytorch pytorch-cuda=11.8 -c pytorch -c nvidia
    conda install -c "nvidia/label/cuda-11.8.0" cuda-toolkit
    pip install -r requirements.txt
    ```
    
    ## Loading the Dataset
    
    To load the dataset, use the following code:
    
    ```python
    from datasets import load_dataset
    dataset = load_dataset("locuslab/TOFU","full")
    ```


    """)

# scheduler = BackgroundScheduler()
# scheduler.add_job(restart_space, "interval", seconds=1800)
# scheduler.start()
# demo.queue(default_concurrency_limit=40).launch()

# demo.launch()
scheduler = BackgroundScheduler()
scheduler.add_job(restart_space, "interval", seconds=3600)
scheduler.start()
demo.launch(debug=True)