boyiwei commited on
Commit
30c2633
β€’
1 Parent(s): 81618ab
Files changed (2) hide show
  1. app.py +62 -34
  2. uploads.py +1 -1
app.py CHANGED
@@ -58,6 +58,29 @@ def baseline_load_data(model,version,metrics):
58
 
59
  return df
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  def load_data(model, version, metrics):
62
  baseline_df = baseline_load_data(model, version, metrics)
63
  # now for every file in "versions/{model}-{version}/*.csv"
@@ -109,57 +132,62 @@ with demo:
109
  with gr.Tabs():
110
  with gr.TabItem("Leaderboard"):
111
  with gr.Row():
112
- version_dropdown = gr.Dropdown(
113
- choices=["1%", "5%", "10%"],
114
- label="πŸ”„ Select Forget Percentage",
115
- value="10%",
116
  )
117
- model_dropdown = gr.Dropdown(
118
- choices=["llama", "phi"],
119
- label="πŸ”„ Select Base Model",
120
- value="llama",
121
  )
122
- with gr.Row():
123
- metrics_checkbox = gr.CheckboxGroup(
124
- label="Select Metrics",
125
- choices=["ROUGE", "Truth Ratio", "Prob."],
126
- value = ["ROUGE", "Truth Ratio", "Prob."],
127
- )
128
-
129
- with gr.Row():
130
- search_bar = gr.Textbox(
131
- placeholder="Search for methods...",
132
- show_label=False,
133
  )
134
 
135
  leaderboard_table = gr.components.Dataframe(
136
- value=load_data("llama", "10%", ["ROUGE", "Truth Ratio", "Prob."]),
137
  interactive=True,
138
  visible=True,
139
  )
140
-
141
- version_dropdown.change(
142
  change_version,
143
- inputs=[model_dropdown,version_dropdown,metrics_checkbox],
144
  outputs=leaderboard_table
145
  )
146
-
147
- model_dropdown.change(
148
  change_version,
149
- inputs=[model_dropdown,version_dropdown,metrics_checkbox],
150
  outputs=leaderboard_table
151
  )
152
-
153
- search_bar.change(
154
- search_leaderboard,
155
- inputs=[leaderboard_table, search_bar,metrics_checkbox],
156
  outputs=leaderboard_table
157
  )
 
 
 
 
 
 
 
 
 
 
 
 
158
 
159
- metrics_checkbox.change(
160
- change_version,
161
- inputs=[model_dropdown,version_dropdown,metrics_checkbox],
162
- outputs=leaderboard_table
163
  )
164
 
165
  with gr.Accordion("Submit a new model for evaluation"):
 
58
 
59
  return df
60
 
61
+ def update_dropdowns(setting, dataset, model):
62
+ updates = {
63
+ "setting": gr.update(interactive=True),
64
+ "dataset": gr.update(interactive=True),
65
+ "model": gr.update(interactive=True),
66
+ }
67
+
68
+ if setting == "memorization":
69
+ updates["dataset"] = gr.update(value="news", interactive=False)
70
+ updates["model"] = gr.update(value="llama2-7B-chat_newsqa", interactive=False)
71
+ elif dataset == "books":
72
+ updates["setting"] = gr.update(value="RAG", interactive=False)
73
+ if model == "llama2-7B-chat_newsqa":
74
+ updates["model"] = gr.update(value="llama2-7B-chat-hf", interactive=True)
75
+ elif model == "llama2-7B-chat_newsqa":
76
+ updates["setting"] = gr.update(value="memorization", interactive=False)
77
+ updates["dataset"] = gr.update(value="news", interactive=False)
78
+ elif model != "llama2-7B-chat_newsqa":
79
+ updates["setting"] = gr.update(value="RAG", interactive=False)
80
+
81
+ return updates["setting"], updates["dataset"], updates["model"]
82
+
83
+
84
  def load_data(model, version, metrics):
85
  baseline_df = baseline_load_data(model, version, metrics)
86
  # now for every file in "versions/{model}-{version}/*.csv"
 
132
  with gr.Tabs():
133
  with gr.TabItem("Leaderboard"):
134
  with gr.Row():
135
+ setting_dropdown = gr.Dropdown(
136
+ choices = ["RAG", "memorization"],
137
+ label="πŸ”„ Select Setting",
138
+ value="RAG",
139
  )
140
+ dataset_dropdown = gr.Dropdown(
141
+ choices = ['news', 'books'],
142
+ label="πŸ”„ Select Dataset",
143
+ value="news",
144
  )
145
+ model_dropdown = gr.Dropdown(
146
+ choices=["llama2-7B-chat-hf", "llama2-70B-chat-hf", "dbrx-instruct", "llama2-7B-chat_newsqa"],
147
+ label="πŸ”„ Select Model",
148
+ value="llama2-7B-chat-hf",
 
 
 
 
 
 
 
149
  )
150
 
151
  leaderboard_table = gr.components.Dataframe(
152
+ value=load_data("RAG", "news", "llama2-7B-chat-hf"),
153
  interactive=True,
154
  visible=True,
155
  )
156
+
157
+ setting_dropdown.change(
158
  change_version,
159
+ inputs=[dataset_dropdown,model_dropdown],
160
  outputs=leaderboard_table
161
  )
162
+
163
+ dataset_dropdown.change(
164
  change_version,
165
+ inputs=[setting_dropdown, model_dropdown],
166
  outputs=leaderboard_table
167
  )
168
+
169
+ model_dropdown.change(
170
+ change_version,
171
+ inputs=[setting_dropdown, dataset_dropdown],
172
  outputs=leaderboard_table
173
  )
174
+
175
+ setting_dropdown.change(
176
+ update_dropdowns,
177
+ inputs=[setting_dropdown, dataset_dropdown, model_dropdown],
178
+ outputs=[setting_dropdown, dataset_dropdown, model_dropdown]
179
+ )
180
+
181
+ dataset_dropdown.change(
182
+ update_dropdowns,
183
+ inputs=[setting_dropdown, dataset_dropdown, model_dropdown],
184
+ outputs=[setting_dropdown, dataset_dropdown, model_dropdown]
185
+ )
186
 
187
+ model_dropdown.change(
188
+ update_dropdowns,
189
+ inputs=[setting_dropdown, dataset_dropdown, model_dropdown],
190
+ outputs=[setting_dropdown, dataset_dropdown, model_dropdown]
191
  )
192
 
193
  with gr.Accordion("Submit a new model for evaluation"):
uploads.py CHANGED
@@ -5,7 +5,7 @@ import datetime
5
  import pandas as pd
6
 
7
 
8
- RESULTS_PATH = "locuslab/CoTaEval_leaderboard"
9
  api = HfApi()
10
  TOKEN = os.environ.get("TOKEN", None)
11
  YEAR_VERSION = "2024"
 
5
  import pandas as pd
6
 
7
 
8
+ RESULTS_PATH = "boyiwei/CoTaEval_leaderboard"
9
  api = HfApi()
10
  TOKEN = os.environ.get("TOKEN", None)
11
  YEAR_VERSION = "2024"