haoyang commited on
Commit
48dac78
β€’
1 Parent(s): 068538c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +86 -86
app.py CHANGED
@@ -239,92 +239,92 @@ with demo:
239
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=3):
240
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
241
 
242
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=4):
243
- with gr.Column():
244
- with gr.Row():
245
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
246
-
247
- with gr.Column():
248
- with gr.Accordion(
249
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
250
- open=False,
251
- ):
252
- with gr.Row():
253
- finished_eval_table = gr.components.Dataframe(
254
- value=finished_eval_queue_df,
255
- headers=EVAL_COLS,
256
- datatype=EVAL_TYPES,
257
- row_count=5,
258
- )
259
- with gr.Accordion(
260
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
261
- open=False,
262
- ):
263
- with gr.Row():
264
- running_eval_table = gr.components.Dataframe(
265
- value=running_eval_queue_df,
266
- headers=EVAL_COLS,
267
- datatype=EVAL_TYPES,
268
- row_count=5,
269
- )
270
-
271
- with gr.Accordion(
272
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
273
- open=False,
274
- ):
275
- with gr.Row():
276
- pending_eval_table = gr.components.Dataframe(
277
- value=pending_eval_queue_df,
278
- headers=EVAL_COLS,
279
- datatype=EVAL_TYPES,
280
- row_count=5,
281
- )
282
- with gr.Row():
283
- gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
284
-
285
- with gr.Row():
286
- with gr.Column():
287
- model_name_textbox = gr.Textbox(label="Model name")
288
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
289
- model_type = gr.Dropdown(
290
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
291
- label="Model type",
292
- multiselect=False,
293
- value=None,
294
- interactive=True,
295
- )
296
-
297
- with gr.Column():
298
- precision = gr.Dropdown(
299
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
300
- label="Precision",
301
- multiselect=False,
302
- value="float16",
303
- interactive=True,
304
- )
305
- weight_type = gr.Dropdown(
306
- choices=[i.value.name for i in WeightType],
307
- label="Weights type",
308
- multiselect=False,
309
- value="Original",
310
- interactive=True,
311
- )
312
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
313
-
314
- submit_button = gr.Button("Submit Eval")
315
- submission_result = gr.Markdown()
316
- submit_button.click(
317
- add_new_eval,
318
- [
319
- model_name_textbox,
320
- base_model_name_textbox,
321
- revision_name_textbox,
322
- precision,
323
- weight_type,
324
- model_type,
325
- ],
326
- submission_result,
327
- )
328
 
329
  with gr.Row():
330
  with gr.Accordion("πŸ“™ Comment", open=False):
 
239
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=3):
240
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
241
 
242
+ # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=4):
243
+ # with gr.Column():
244
+ # with gr.Row():
245
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
246
+
247
+ # with gr.Column():
248
+ # with gr.Accordion(
249
+ # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
250
+ # open=False,
251
+ # ):
252
+ # with gr.Row():
253
+ # finished_eval_table = gr.components.Dataframe(
254
+ # value=finished_eval_queue_df,
255
+ # headers=EVAL_COLS,
256
+ # datatype=EVAL_TYPES,
257
+ # row_count=5,
258
+ # )
259
+ # with gr.Accordion(
260
+ # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
261
+ # open=False,
262
+ # ):
263
+ # with gr.Row():
264
+ # running_eval_table = gr.components.Dataframe(
265
+ # value=running_eval_queue_df,
266
+ # headers=EVAL_COLS,
267
+ # datatype=EVAL_TYPES,
268
+ # row_count=5,
269
+ # )
270
+
271
+ # with gr.Accordion(
272
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
273
+ # open=False,
274
+ # ):
275
+ # with gr.Row():
276
+ # pending_eval_table = gr.components.Dataframe(
277
+ # value=pending_eval_queue_df,
278
+ # headers=EVAL_COLS,
279
+ # datatype=EVAL_TYPES,
280
+ # row_count=5,
281
+ # )
282
+ # with gr.Row():
283
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
284
+
285
+ # with gr.Row():
286
+ # with gr.Column():
287
+ # model_name_textbox = gr.Textbox(label="Model name")
288
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
289
+ # model_type = gr.Dropdown(
290
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
291
+ # label="Model type",
292
+ # multiselect=False,
293
+ # value=None,
294
+ # interactive=True,
295
+ # )
296
+
297
+ # with gr.Column():
298
+ # precision = gr.Dropdown(
299
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
300
+ # label="Precision",
301
+ # multiselect=False,
302
+ # value="float16",
303
+ # interactive=True,
304
+ # )
305
+ # weight_type = gr.Dropdown(
306
+ # choices=[i.value.name for i in WeightType],
307
+ # label="Weights type",
308
+ # multiselect=False,
309
+ # value="Original",
310
+ # interactive=True,
311
+ # )
312
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
313
+
314
+ # submit_button = gr.Button("Submit Eval")
315
+ # submission_result = gr.Markdown()
316
+ # submit_button.click(
317
+ # add_new_eval,
318
+ # [
319
+ # model_name_textbox,
320
+ # base_model_name_textbox,
321
+ # revision_name_textbox,
322
+ # precision,
323
+ # weight_type,
324
+ # model_type,
325
+ # ],
326
+ # submission_result,
327
+ # )
328
 
329
  with gr.Row():
330
  with gr.Accordion("πŸ“™ Comment", open=False):