JaydenCool commited on
Commit
c1bc2bf
β€’
1 Parent(s): d7041cd
Files changed (1) hide show
  1. app.py +2 -197
app.py CHANGED
@@ -61,13 +61,7 @@ def get_dataset_csv(
61
  ):
62
  df = ORIGINAL_DF[ORIGINAL_DF['Size'].isin(model_size)]
63
  df = df.drop(columns="Size")
64
-
65
- # if metric_choice != "None":
66
- # metric_choice = metric_choice + "/std"
67
- # sort_basis = df[metric_choice].apply(format_csv_numbers)
68
- # sorted_indices = sort_basis.argsort()[::-1]
69
- # df = df.iloc[sorted_indices]
70
-
71
  leaderboard_table = gr.components.Dataframe(
72
  value=df,
73
  interactive=False,
@@ -81,12 +75,6 @@ def get_dataset_csv_per(
81
  df = ORIGINAL_DF_PER[ORIGINAL_DF_PER['Size'].isin(model_size)]
82
  df = df.drop(columns="Size")
83
 
84
- # if metric_choice != "None":
85
- # metric_choice = metric_choice + "/std"
86
- # sort_basis = df[metric_choice].apply(format_csv_numbers)
87
- # sorted_indices = sort_basis.argsort()[::-1]
88
- # df = df.iloc[sorted_indices]
89
-
90
  leaderboard_table = gr.components.Dataframe(
91
  value=df,
92
  interactive=False,
@@ -106,16 +94,6 @@ def get_dataset_csv_sub_gen(
106
  subclass_choice_label = ["Model", subclass_choice+"_Accuracy", subclass_choice+"_Precision", subclass_choice+"_Recall"]
107
  df = df[subclass_choice_label]
108
 
109
- # if metric_choice != "None":
110
- # # metric_choice = metric_choice + "/std"
111
- # metric_choice = metric_choice.split("_")[0]
112
- # metric_choice = subclass_choice + "_" + metric_choice
113
- # # sort_basis = df[metric_choice].apply(format_csv_numbers)
114
- # sort_basis = df[metric_choice]
115
-
116
- # sorted_indices = sort_basis.argsort()[::-1]
117
- # df = df.iloc[sorted_indices]
118
-
119
  leaderboard_table = gr.components.Dataframe(
120
  value=df,
121
  interactive=False,
@@ -135,16 +113,6 @@ def get_dataset_csv_sub_per(
135
  subclass_choice_label = ["Model", subclass_choice+"_Accuracy", subclass_choice+"_Precision", subclass_choice+"_Recall"]
136
  df = df[subclass_choice_label]
137
 
138
- # if metric_choice != "None":
139
- # # metric_choice = metric_choice + "/std"
140
- # metric_choice = metric_choice.split("_")[0]
141
- # metric_choice = subclass_choice + "_" + metric_choice
142
- # # sort_basis = df[metric_choice].apply(format_csv_numbers)
143
- # sort_basis = df[metric_choice]
144
-
145
- # sorted_indices = sort_basis.argsort()[::-1]
146
- # df = df.iloc[sorted_indices]
147
-
148
  leaderboard_table = gr.components.Dataframe(
149
  value=df,
150
  interactive=False,
@@ -197,14 +165,6 @@ with gr.Blocks() as demo:
197
  info="Please choose the type to display.",
198
  )
199
 
200
- # with gr.Column(scale=0.8):
201
- # metric_choice = gr.Dropdown(
202
- # choices=METRICS,
203
- # value="None",
204
- # label="Metric",
205
- # info="Please choose the metric to display.",
206
- # )
207
-
208
  with gr.Column(scale=10):
209
  model_choice = gr.CheckboxGroup(
210
  choices=CLASSIFICATION["model_size"],
@@ -213,40 +173,8 @@ with gr.Blocks() as demo:
213
  info="Please choose the model size to display.",
214
  )
215
 
216
-
217
- # with gr.Column(scale=0.8):
218
- # subclass_choice = gr.Dropdown(
219
- # choices=SUBCLASS,
220
- # value="Discrimination",
221
- # label="Subclass",
222
- # info="Please choose the subclass to display.",
223
- # )
224
-
225
-
226
- #πŸ‘‰ this part is for csv table generatived
227
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
228
-
229
- # with gr.TabItem("πŸ… Overall Generatived", elem_id="od-benchmark-tab-table", id=1):
230
- # dataframe = gr.components.Dataframe(
231
- # elem_id="leaderboard-table",
232
- # )
233
- # #πŸ‘‰ this part is for csv table perplexity
234
- # with gr.TabItem("πŸ… Overall Perplexity", elem_id="od-benchmark-tab-table", id=2):
235
- # datafram_per = gr.components.Dataframe(
236
- # elem_id="leaderboard-table",
237
- # )
238
-
239
- # #πŸ‘‰ this part is for csv subclass table generatived
240
- # with gr.TabItem("πŸ… Subclass Generatived", elem_id="od-benchmark-tab-table", id=3):
241
- # dataframe_sub_gen = gr.components.Dataframe(
242
- # elem_id="leaderboard-table",
243
- # )
244
-
245
- # #πŸ‘‰ this part is for csv subclass table perplexity
246
- # with gr.TabItem("πŸ… Subclass Perplexity", elem_id="od-benchmark-tab-table", id=4):
247
- # dataframe_sub_per = gr.components.Dataframe(
248
- # elem_id="leaderboard-table",
249
- # )
250
  # ----------------- modify text -----------------
251
 
252
  with gr.TabItem("πŸ… Generation", elem_id="od-benchmark-tab-table", id=6):
@@ -269,105 +197,6 @@ with gr.Blocks() as demo:
269
 
270
  gr.Markdown(f"Last updated on **{_LAST_UPDATED}**", elem_classes="markdown-text")
271
 
272
- # πŸ‘‰ this part is for citation
273
- # with gr.Row():
274
- # with gr.Accordion("πŸ“™ Citation", open=False):
275
- # gr.Textbox(
276
- # value=_BIBTEX,
277
- # lines=7,
278
- # label="Copy the BibTeX snippet to cite this source",
279
- # elem_id="citation-button",
280
- # show_copy_button=True
281
- # )
282
-
283
- # this is result based on generative
284
- # metric_choice.change(
285
- # get_dataset_csv,
286
- # inputs=[model_choice, metric_choice],
287
- # outputs=dataframe,
288
- # )
289
-
290
- # model_choice.change(
291
- # get_dataset_csv,
292
- # inputs=[model_choice, metric_choice],
293
- # outputs=dataframe,
294
- # )
295
-
296
- # demo.load(
297
- # fn=get_dataset_csv,
298
- # inputs=[model_choice, metric_choice],
299
- # outputs=dataframe,
300
- # )
301
-
302
- # # this is result based on Perplexity
303
- # metric_choice.change(
304
- # get_dataset_csv_per,
305
- # inputs=[model_choice, metric_choice],
306
- # outputs=datafram_per,
307
- # )
308
-
309
- # model_choice.change(
310
- # get_dataset_csv_per,
311
- # inputs=[model_choice, metric_choice],
312
- # outputs=datafram_per,
313
- # )
314
-
315
- # demo.load(
316
- # fn=get_dataset_csv_per,
317
- # inputs=[model_choice, metric_choice],
318
- # outputs=datafram_per,
319
- # )
320
-
321
- # this is subclass result generatived
322
- # metric_choice.change(
323
- # get_dataset_csv_sub_gen,
324
- # inputs=[model_choice, metric_choice, subclass_choice],
325
- # outputs=dataframe_sub_gen,
326
- # )
327
-
328
- # model_choice.change(
329
- # get_dataset_csv_sub_gen,
330
- # inputs=[model_choice, metric_choice, subclass_choice],
331
- # outputs=dataframe_sub_gen,
332
- # )
333
-
334
- # subclass_choice.change(
335
- # get_dataset_csv_sub_gen,
336
- # inputs=[model_choice, metric_choice, subclass_choice],
337
- # outputs=dataframe_sub_gen,
338
- # )
339
-
340
- # demo.load(
341
- # fn=get_dataset_csv_sub_gen,
342
- # inputs=[model_choice, metric_choice, subclass_choice],
343
- # outputs=dataframe_sub_gen,
344
- # )
345
-
346
- # # this is subclass result Perplexity
347
- # # metric_choice.change(
348
- # # get_dataset_csv_sub_per,
349
- # # inputs=[model_choice, metric_choice, subclass_choice],
350
- # # outputs=dataframe_sub_per,
351
- # # )
352
-
353
- # model_choice.change(
354
- # get_dataset_csv_sub_per,
355
- # inputs=[model_choice, metric_choice, subclass_choice],
356
- # outputs=dataframe_sub_per,
357
- # )
358
-
359
- # subclass_choice.change(
360
- # get_dataset_csv_sub_per,
361
- # inputs=[model_choice, metric_choice, subclass_choice],
362
- # outputs=dataframe_sub_per,
363
- # )
364
-
365
- # demo.load(
366
- # fn=get_dataset_csv_sub_per,
367
- # inputs=[model_choice, metric_choice, subclass_choice],
368
- # outputs=dataframe_sub_per,
369
- # )
370
-
371
  # --------------------------- all --------------------------------
372
  # this is all result Perplexity
373
 
@@ -382,18 +211,6 @@ with gr.Blocks() as demo:
382
  inputs=[model_choice, main_choice],
383
  outputs=dataframe_all_per,
384
  )
385
-
386
- # metric_choice.change(
387
- # get_dataset_classfier_per,
388
- # inputs=[model_choice, main_choice],
389
- # outputs=dataframe_all_per,
390
- # )
391
-
392
- # subclass_choice.change(
393
- # get_dataset_classfier_per,
394
- # inputs=[model_choice, metric_choice, main_choice],
395
- # outputs=dataframe_all_per,
396
- # )
397
 
398
  demo.load(
399
  fn=get_dataset_classfier_per,
@@ -414,18 +231,6 @@ with gr.Blocks() as demo:
414
  outputs=dataframe_all_gen,
415
  )
416
 
417
- # metric_choice.change(
418
- # get_dataset_classfier_gen,
419
- # inputs=[model_choice, metric_choice, main_choice],
420
- # outputs=dataframe_all_gen,
421
- # )
422
-
423
- # subclass_choice.change(
424
- # get_dataset_classfier_gen,
425
- # inputs=[model_choice, metric_choice, main_choice],
426
- # outputs=dataframe_all_gen,
427
- # )
428
-
429
  demo.load(
430
  fn=get_dataset_classfier_gen,
431
  inputs=[model_choice, main_choice],
 
61
  ):
62
  df = ORIGINAL_DF[ORIGINAL_DF['Size'].isin(model_size)]
63
  df = df.drop(columns="Size")
64
+
 
 
 
 
 
 
65
  leaderboard_table = gr.components.Dataframe(
66
  value=df,
67
  interactive=False,
 
75
  df = ORIGINAL_DF_PER[ORIGINAL_DF_PER['Size'].isin(model_size)]
76
  df = df.drop(columns="Size")
77
 
 
 
 
 
 
 
78
  leaderboard_table = gr.components.Dataframe(
79
  value=df,
80
  interactive=False,
 
94
  subclass_choice_label = ["Model", subclass_choice+"_Accuracy", subclass_choice+"_Precision", subclass_choice+"_Recall"]
95
  df = df[subclass_choice_label]
96
 
 
 
 
 
 
 
 
 
 
 
97
  leaderboard_table = gr.components.Dataframe(
98
  value=df,
99
  interactive=False,
 
113
  subclass_choice_label = ["Model", subclass_choice+"_Accuracy", subclass_choice+"_Precision", subclass_choice+"_Recall"]
114
  df = df[subclass_choice_label]
115
 
 
 
 
 
 
 
 
 
 
 
116
  leaderboard_table = gr.components.Dataframe(
117
  value=df,
118
  interactive=False,
 
165
  info="Please choose the type to display.",
166
  )
167
 
 
 
 
 
 
 
 
 
168
  with gr.Column(scale=10):
169
  model_choice = gr.CheckboxGroup(
170
  choices=CLASSIFICATION["model_size"],
 
173
  info="Please choose the model size to display.",
174
  )
175
 
176
+ #πŸ‘‰ this part is for csv table generatived
 
 
 
 
 
 
 
 
 
 
177
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
178
  # ----------------- modify text -----------------
179
 
180
  with gr.TabItem("πŸ… Generation", elem_id="od-benchmark-tab-table", id=6):
 
197
 
198
  gr.Markdown(f"Last updated on **{_LAST_UPDATED}**", elem_classes="markdown-text")
199
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
200
  # --------------------------- all --------------------------------
201
  # this is all result Perplexity
202
 
 
211
  inputs=[model_choice, main_choice],
212
  outputs=dataframe_all_per,
213
  )
 
 
 
 
 
 
 
 
 
 
 
 
214
 
215
  demo.load(
216
  fn=get_dataset_classfier_per,
 
231
  outputs=dataframe_all_gen,
232
  )
233
 
 
 
 
 
 
 
 
 
 
 
 
 
234
  demo.load(
235
  fn=get_dataset_classfier_gen,
236
  inputs=[model_choice, main_choice],