sheonhan commited on
Commit
85dbbc4
1 Parent(s): 614ee1f

display message after eval submission

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app.py +41 -15
.gitignore CHANGED
@@ -1,3 +1,4 @@
1
  evals/
2
  venv/
3
  __pycache__/
 
1
  evals/
2
  venv/
3
  __pycache__/
4
+ .env
app.py CHANGED
@@ -145,27 +145,37 @@ def is_model_on_hub(model_name, revision) -> bool:
145
 
146
 
147
 
148
- def add_new_eval(model:str, base_model : str, revision:str, is_8_bit_eval: bool, private:bool, is_delta_weight:bool):
 
 
 
 
 
 
 
149
  # check the model actually exists before adding the eval
150
  if revision == "":
151
  revision = "main"
152
  if is_delta_weight and not is_model_on_hub(base_model, revision):
153
- print(base_model, "base model not found on hub")
154
- return
 
155
 
156
  if not is_model_on_hub(model, revision):
157
- print(model, "not found on hub")
158
- return
 
 
159
  print("adding new eval")
160
 
161
  eval_entry = {
162
- "model" : model,
163
- "base_model" : base_model,
164
- "revision" : revision,
165
- "private" : private,
166
- "8bit_eval" : is_8_bit_eval,
167
- "is_delta_weight" : is_delta_weight,
168
- "status" : "PENDING"
169
  }
170
 
171
  user_name = ""
@@ -174,7 +184,7 @@ def add_new_eval(model:str, base_model : str, revision:str, is_8_bit_eval: bool,
174
  user_name = model.split("/")[0]
175
  model_path = model.split("/")[1]
176
 
177
- OUT_DIR=f"eval_requests/{user_name}"
178
  os.makedirs(OUT_DIR, exist_ok=True)
179
  out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{is_8_bit_eval}_{is_delta_weight}.json"
180
 
@@ -190,6 +200,9 @@ def add_new_eval(model:str, base_model : str, revision:str, is_8_bit_eval: bool,
190
  token=H4_TOKEN,
191
  repo_type="dataset",
192
  )
 
 
 
193
 
194
 
195
  def refresh():
@@ -247,8 +260,21 @@ We chose these benchmarks as they test a variety of reasoning and general knowle
247
 
248
  with gr.Row():
249
  submit_button = gr.Button("Submit Eval")
250
- submit_button.click(add_new_eval, [model_name_textbox, base_model_name_textbox, revision_name_textbox, is_8bit_toggle, private, is_delta_weight])
 
 
 
 
 
 
 
 
 
 
 
 
 
251
 
252
 
253
  block.load(refresh, inputs=[], outputs=[leaderboard_table, eval_table])
254
- block.launch()
145
 
146
 
147
 
148
+ def add_new_eval(
149
+ model: str,
150
+ base_model: str,
151
+ revision: str,
152
+ is_8_bit_eval: bool,
153
+ private: bool,
154
+ is_delta_weight: bool,
155
+ ):
156
  # check the model actually exists before adding the eval
157
  if revision == "":
158
  revision = "main"
159
  if is_delta_weight and not is_model_on_hub(base_model, revision):
160
+ error_message = f"Base model \"{base_model}\" was not found on hub!"
161
+ print(error_message)
162
+ return f"<p style='color: red; font-size: 18px; text-align: center;'>{error_message}</p>"
163
 
164
  if not is_model_on_hub(model, revision):
165
+ error_message = f"Model \"{model}\"was not found on hub!"
166
+ print(error_message)
167
+ return f"<p style='color: red; font-size: 18px; text-align: center;'>{error_message}</p>"
168
+
169
  print("adding new eval")
170
 
171
  eval_entry = {
172
+ "model": model,
173
+ "base_model": base_model,
174
+ "revision": revision,
175
+ "private": private,
176
+ "8bit_eval": is_8_bit_eval,
177
+ "is_delta_weight": is_delta_weight,
178
+ "status": "PENDING",
179
  }
180
 
181
  user_name = ""
184
  user_name = model.split("/")[0]
185
  model_path = model.split("/")[1]
186
 
187
+ OUT_DIR = f"eval_requests/{user_name}"
188
  os.makedirs(OUT_DIR, exist_ok=True)
189
  out_path = f"{OUT_DIR}/{model_path}_eval_request_{private}_{is_8_bit_eval}_{is_delta_weight}.json"
190
 
200
  token=H4_TOKEN,
201
  repo_type="dataset",
202
  )
203
+
204
+ success_message = "Your request has been submitted to the evaluation queue!"
205
+ return f"<p style='color: green; font-size: 18px; text-align: center;'>{success_message}</p>"
206
 
207
 
208
  def refresh():
260
 
261
  with gr.Row():
262
  submit_button = gr.Button("Submit Eval")
263
+ with gr.Row():
264
+ submission_result = gr.Markdown()
265
+ submit_button.click(
266
+ add_new_eval,
267
+ [
268
+ model_name_textbox,
269
+ base_model_name_textbox,
270
+ revision_name_textbox,
271
+ is_8bit_toggle,
272
+ private,
273
+ is_delta_weight,
274
+ ],
275
+ submission_result
276
+ )
277
 
278
 
279
  block.load(refresh, inputs=[], outputs=[leaderboard_table, eval_table])
280
+ block.launch()