ClΓ©mentine commited on
Commit
ecef2dc
β€’
1 Parent(s): 613696b

updated design to select columns to display

Browse files
Files changed (2) hide show
  1. app.py +28 -38
  2. src/utils_display.py +1 -1
app.py CHANGED
@@ -259,16 +259,22 @@ def refresh():
259
  )
260
 
261
 
262
- def search_table(df, query):
263
- if AutoEvalColumn.model_type.name in df.columns:
264
  filtered_df = df[
265
  (df[AutoEvalColumn.dummy.name].str.contains(query, case=False))
266
  | (df[AutoEvalColumn.model_type.name].str.contains(query, case=False))
267
  ]
268
  else:
269
  filtered_df = df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
270
- return filtered_df
 
271
 
 
 
 
 
 
272
 
273
  def change_tab(query_param):
274
  query_param = query_param.replace("'", '"')
@@ -288,44 +294,30 @@ demo = gr.Blocks(css=custom_css)
288
  with demo:
289
  gr.HTML(TITLE)
290
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
291
- with gr.Row():
292
- with gr.Box(elem_id="search-bar-table-box"):
293
- search_bar = gr.Textbox(
294
- placeholder="πŸ” Search your model and press ENTER...",
295
- show_label=False,
296
- elem_id="search-bar",
297
- )
298
 
299
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
300
  with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
301
- leaderboard_table_lite = gr.components.Dataframe(
302
- value=leaderboard_df[COLS_LITE],
303
- headers=COLS_LITE,
304
- datatype=TYPES_LITE,
305
- max_rows=None,
306
- elem_id="leaderboard-table-lite",
307
- )
308
- # Dummy leaderboard for handling the case when the user uses backspace key
309
- hidden_leaderboard_table_for_search_lite = gr.components.Dataframe(
310
- value=original_df[COLS_LITE],
311
- headers=COLS_LITE,
312
- datatype=TYPES_LITE,
313
- max_rows=None,
314
- visible=False,
315
- )
316
- search_bar.submit(
317
- search_table,
318
- [hidden_leaderboard_table_for_search_lite, search_bar],
319
- leaderboard_table_lite,
320
- )
321
-
322
- with gr.TabItem("πŸ” Extended model view", elem_id="llm-benchmark-tab-table", id=1):
323
  leaderboard_table = gr.components.Dataframe(
324
- value=leaderboard_df,
325
- headers=COLS,
326
  datatype=TYPES,
327
  max_rows=None,
328
  elem_id="leaderboard-table",
 
 
329
  )
330
 
331
  # Dummy leaderboard for handling the case when the user uses backspace key
@@ -338,9 +330,10 @@ with demo:
338
  )
339
  search_bar.submit(
340
  search_table,
341
- [hidden_leaderboard_table_for_search, search_bar],
342
  leaderboard_table,
343
  )
 
344
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
345
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
346
 
@@ -392,7 +385,6 @@ with demo:
392
  label="Model type",
393
  multiselect=False,
394
  value="pretrained",
395
- max_choices=1,
396
  interactive=True,
397
  )
398
 
@@ -402,7 +394,6 @@ with demo:
402
  label="Precision",
403
  multiselect=False,
404
  value="float16",
405
- max_choices=1,
406
  interactive=True,
407
  )
408
  weight_type = gr.Dropdown(
@@ -410,7 +401,6 @@ with demo:
410
  label="Weights type",
411
  multiselect=False,
412
  value="Original",
413
- max_choices=1,
414
  interactive=True,
415
  )
416
  base_model_name_textbox = gr.Textbox(
 
259
  )
260
 
261
 
262
+ def search_table(df, leaderboard_table, query):
263
+ if AutoEvalColumn.model_type.name in leaderboard_table.columns:
264
  filtered_df = df[
265
  (df[AutoEvalColumn.dummy.name].str.contains(query, case=False))
266
  | (df[AutoEvalColumn.model_type.name].str.contains(query, case=False))
267
  ]
268
  else:
269
  filtered_df = df[(df[AutoEvalColumn.dummy.name].str.contains(query, case=False))]
270
+ return filtered_df[leaderboard_table.columns]
271
+
272
 
273
+ def select_columns(df, columns):
274
+ always_here_cols = [AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name]
275
+ # We use COLS to maintain sorting
276
+ filtered_df = df[always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]]
277
+ return filtered_df
278
 
279
  def change_tab(query_param):
280
  query_param = query_param.replace("'", '"')
 
294
  with demo:
295
  gr.HTML(TITLE)
296
  gr.Markdown(INTRODUCTION_TEXT, elem_classes="markdown-text")
 
 
 
 
 
 
 
297
 
298
  with gr.Tabs(elem_classes="tab-buttons") as tabs:
299
  with gr.TabItem("πŸ… LLM Benchmark", elem_id="llm-benchmark-tab-table", id=0):
300
+ with gr.Row():
301
+ shown_columns = gr.CheckboxGroup(
302
+ choices = [c for c in COLS if c not in [AutoEvalColumn.dummy.name, AutoEvalColumn.model.name, AutoEvalColumn.model_type_symbol.name]],
303
+ value = [c for c in COLS_LITE if c not in [AutoEvalColumn.dummy.name, AutoEvalColumn.model.name, AutoEvalColumn.model_type_symbol.name]],
304
+ label="Select columns to show",
305
+ elem_id="column-select",
306
+ interactive=True,
307
+ )
308
+ search_bar = gr.Textbox(
309
+ placeholder="πŸ” Search for your model and press ENTER...",
310
+ show_label=False,
311
+ elem_id="search-bar",
312
+ )
 
 
 
 
 
 
 
 
 
313
  leaderboard_table = gr.components.Dataframe(
314
+ value=leaderboard_df[[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value+ [AutoEvalColumn.dummy.name]],
315
+ headers=[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value + [AutoEvalColumn.dummy.name],
316
  datatype=TYPES,
317
  max_rows=None,
318
  elem_id="leaderboard-table",
319
+ interactive=False,
320
+ visible=True,
321
  )
322
 
323
  # Dummy leaderboard for handling the case when the user uses backspace key
 
330
  )
331
  search_bar.submit(
332
  search_table,
333
+ [hidden_leaderboard_table_for_search, leaderboard_table, search_bar],
334
  leaderboard_table,
335
  )
336
+ shown_columns.change(select_columns, [hidden_leaderboard_table_for_search, shown_columns], leaderboard_table)
337
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
338
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
339
 
 
385
  label="Model type",
386
  multiselect=False,
387
  value="pretrained",
 
388
  interactive=True,
389
  )
390
 
 
394
  label="Precision",
395
  multiselect=False,
396
  value="float16",
 
397
  interactive=True,
398
  )
399
  weight_type = gr.Dropdown(
 
401
  label="Weights type",
402
  multiselect=False,
403
  value="Original",
 
404
  interactive=True,
405
  )
406
  base_model_name_textbox = gr.Textbox(
src/utils_display.py CHANGED
@@ -20,7 +20,7 @@ class AutoEvalColumn: # Auto evals column
20
  arc = ColumnContent("ARC", "number", True)
21
  hellaswag = ColumnContent("HellaSwag", "number", True)
22
  mmlu = ColumnContent("MMLU", "number", True)
23
- truthfulqa = ColumnContent("TruthfulQA (MC) ⬆️", "number", True)
24
  model_type = ColumnContent("Type", "str", False)
25
  precision = ColumnContent("Precision", "str", False, True)
26
  license = ColumnContent("Hub License", "str", False)
 
20
  arc = ColumnContent("ARC", "number", True)
21
  hellaswag = ColumnContent("HellaSwag", "number", True)
22
  mmlu = ColumnContent("MMLU", "number", True)
23
+ truthfulqa = ColumnContent("TruthfulQA", "number", True)
24
  model_type = ColumnContent("Type", "str", False)
25
  precision = ColumnContent("Precision", "str", False, True)
26
  license = ColumnContent("Hub License", "str", False)