ftshijt commited on
Commit
9e9bfc4
β€’
1 Parent(s): 3870f69

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +85 -85
app.py CHANGED
@@ -245,92 +245,92 @@ with demo:
245
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
246
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
247
 
248
- with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
249
- with gr.Column():
250
- with gr.Row():
251
- gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
252
-
253
- with gr.Column():
254
- with gr.Accordion(
255
- f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
256
- open=False,
257
- ):
258
- with gr.Row():
259
- finished_eval_table = gr.components.Dataframe(
260
- value=finished_eval_queue_df,
261
- headers=EVAL_COLS,
262
- datatype=EVAL_TYPES,
263
- row_count=5,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
264
  )
265
- with gr.Accordion(
266
- f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
267
- open=False,
268
- ):
269
- with gr.Row():
270
- running_eval_table = gr.components.Dataframe(
271
- value=running_eval_queue_df,
272
- headers=EVAL_COLS,
273
- datatype=EVAL_TYPES,
274
- row_count=5,
275
- )
276
-
277
- with gr.Accordion(
278
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
279
- open=False,
280
- ):
281
- with gr.Row():
282
- pending_eval_table = gr.components.Dataframe(
283
- value=pending_eval_queue_df,
284
- headers=EVAL_COLS,
285
- datatype=EVAL_TYPES,
286
- row_count=5,
287
- )
288
- with gr.Row():
289
- gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
290
-
291
- with gr.Row():
292
- with gr.Column():
293
- model_name_textbox = gr.Textbox(label="Model name")
294
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
295
- model_type = gr.Dropdown(
296
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
297
- label="Model type",
298
- multiselect=False,
299
- value=None,
300
- interactive=True,
301
- )
302
-
303
- with gr.Column():
304
- precision = gr.Dropdown(
305
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
306
- label="Precision",
307
- multiselect=False,
308
- value="float16",
309
- interactive=True,
310
- )
311
- weight_type = gr.Dropdown(
312
- choices=[i.value.name for i in WeightType],
313
- label="Weights type",
314
- multiselect=False,
315
- value="Original",
316
- interactive=True,
317
- )
318
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
319
-
320
- submit_button = gr.Button("Submit Eval")
321
- submission_result = gr.Markdown()
322
- submit_button.click(
323
- add_new_eval,
324
- [
325
- model_name_textbox,
326
- base_model_name_textbox,
327
- revision_name_textbox,
328
- precision,
329
- weight_type,
330
- model_type,
331
- ],
332
- submission_result,
333
- )
334
 
335
  with gr.Row():
336
  with gr.Accordion("πŸ“™ Citation", open=False):
 
245
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
246
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
247
 
248
+ # with gr.TabItem("πŸš€ Submit here! ", elem_id="llm-benchmark-tab-table", id=3):
249
+ # with gr.Column():
250
+ # with gr.Row():
251
+ # gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
252
+
253
+ # with gr.Column():
254
+ # with gr.Accordion(
255
+ # f"βœ… Finished Evaluations ({len(finished_eval_queue_df)})",
256
+ # open=False,
257
+ # ):
258
+ # with gr.Row():
259
+ # finished_eval_table = gr.components.Dataframe(
260
+ # value=finished_eval_queue_df,
261
+ # headers=EVAL_COLS,
262
+ # datatype=EVAL_TYPES,
263
+ # row_count=5,
264
+ # )
265
+ # with gr.Accordion(
266
+ # f"πŸ”„ Running Evaluation Queue ({len(running_eval_queue_df)})",
267
+ # open=False,
268
+ # ):
269
+ # with gr.Row():
270
+ # running_eval_table = gr.components.Dataframe(
271
+ # value=running_eval_queue_df,
272
+ # headers=EVAL_COLS,
273
+ # datatype=EVAL_TYPES,
274
+ # row_count=5,
275
+ # )
276
+
277
+ # with gr.Accordion(
278
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
279
+ # open=False,
280
+ # ):
281
+ # with gr.Row():
282
+ # pending_eval_table = gr.components.Dataframe(
283
+ # value=pending_eval_queue_df,
284
+ # headers=EVAL_COLS,
285
+ # datatype=EVAL_TYPES,
286
+ # row_count=5,
287
  )
288
+ # with gr.Row():
289
+ # gr.Markdown("# βœ‰οΈβœ¨ Submit your model here!", elem_classes="markdown-text")
290
+
291
+ # with gr.Row():
292
+ # with gr.Column():
293
+ # model_name_textbox = gr.Textbox(label="Model name")
294
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
295
+ # model_type = gr.Dropdown(
296
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
297
+ # label="Model type",
298
+ # multiselect=False,
299
+ # value=None,
300
+ # interactive=True,
301
+ # )
302
+
303
+ # with gr.Column():
304
+ # precision = gr.Dropdown(
305
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
306
+ # label="Precision",
307
+ # multiselect=False,
308
+ # value="float16",
309
+ # interactive=True,
310
+ # )
311
+ # weight_type = gr.Dropdown(
312
+ # choices=[i.value.name for i in WeightType],
313
+ # label="Weights type",
314
+ # multiselect=False,
315
+ # value="Original",
316
+ # interactive=True,
317
+ # )
318
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
319
+
320
+ # submit_button = gr.Button("Submit Eval")
321
+ # submission_result = gr.Markdown()
322
+ # submit_button.click(
323
+ # add_new_eval,
324
+ # [
325
+ # model_name_textbox,
326
+ # base_model_name_textbox,
327
+ # revision_name_textbox,
328
+ # precision,
329
+ # weight_type,
330
+ # model_type,
331
+ # ],
332
+ # submission_result,
333
+ # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
334
 
335
  with gr.Row():
336
  with gr.Accordion("πŸ“™ Citation", open=False):