multimodalart HF staff commited on
Commit
f0f600d
β€’
1 Parent(s): ecef2dc

Add filter to model type

Browse files
Files changed (1) hide show
  1. app.py +25 -6
app.py CHANGED
@@ -27,7 +27,7 @@ RESULTS_REPO = "open-llm-leaderboard/results"
27
  PRIVATE_QUEUE_REPO = "open-llm-leaderboard/private-requests"
28
  PRIVATE_RESULTS_REPO = "open-llm-leaderboard/private-results"
29
 
30
- IS_PUBLIC = bool(os.environ.get("IS_PUBLIC", True))
31
 
32
  EVAL_REQUESTS_PATH = "eval-queue"
33
  EVAL_RESULTS_PATH = "eval-results"
@@ -276,6 +276,18 @@ def select_columns(df, columns):
276
  filtered_df = df[always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]]
277
  return filtered_df
278
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  def change_tab(query_param):
280
  query_param = query_param.replace("'", '"')
281
  query_param = json.loads(query_param)
@@ -305,11 +317,17 @@ with demo:
305
  elem_id="column-select",
306
  interactive=True,
307
  )
308
- search_bar = gr.Textbox(
309
- placeholder="πŸ” Search for your model and press ENTER...",
310
- show_label=False,
311
- elem_id="search-bar",
312
- )
 
 
 
 
 
 
313
  leaderboard_table = gr.components.Dataframe(
314
  value=leaderboard_df[[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value+ [AutoEvalColumn.dummy.name]],
315
  headers=[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value + [AutoEvalColumn.dummy.name],
@@ -334,6 +352,7 @@ with demo:
334
  leaderboard_table,
335
  )
336
  shown_columns.change(select_columns, [hidden_leaderboard_table_for_search, shown_columns], leaderboard_table)
 
337
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
338
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
339
 
 
27
  PRIVATE_QUEUE_REPO = "open-llm-leaderboard/private-requests"
28
  PRIVATE_RESULTS_REPO = "open-llm-leaderboard/private-results"
29
 
30
+ IS_PUBLIC = True
31
 
32
  EVAL_REQUESTS_PATH = "eval-queue"
33
  EVAL_RESULTS_PATH = "eval-results"
 
276
  filtered_df = df[always_here_cols + [c for c in COLS if c in df.columns and c in columns] + [AutoEvalColumn.dummy.name]]
277
  return filtered_df
278
 
279
+ #TODO allow this to filter by values of any columns
280
+ def filter_items(df, leaderboard_table, query):
281
+ if query == "all":
282
+ return df[leaderboard_table.columns]
283
+ else:
284
+ query = query[0] #take only the emoji character
285
+ if AutoEvalColumn.model_type_symbol.name in leaderboard_table.columns:
286
+ filtered_df = df[(df[AutoEvalColumn.model_type_symbol.name] == query)]
287
+ else:
288
+ return leaderboard_table.columns
289
+ return filtered_df[leaderboard_table.columns]
290
+
291
  def change_tab(query_param):
292
  query_param = query_param.replace("'", '"')
293
  query_param = json.loads(query_param)
 
317
  elem_id="column-select",
318
  interactive=True,
319
  )
320
+ with gr.Column(min_width=320):
321
+ search_bar = gr.Textbox(
322
+ placeholder="πŸ” Search for your model and press ENTER...",
323
+ show_label=False,
324
+ elem_id="search-bar",
325
+ )
326
+ filter_columns = gr.Radio(
327
+ label="⏚ Filter model types",
328
+ choices = ["all", "🟒 base", "πŸ”Ά instruction-tuned", "🟦 RL-tuned"],
329
+ elem_id="filter-columns"
330
+ )
331
  leaderboard_table = gr.components.Dataframe(
332
  value=leaderboard_df[[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value+ [AutoEvalColumn.dummy.name]],
333
  headers=[AutoEvalColumn.model_type_symbol.name, AutoEvalColumn.model.name] + shown_columns.value + [AutoEvalColumn.dummy.name],
 
352
  leaderboard_table,
353
  )
354
  shown_columns.change(select_columns, [hidden_leaderboard_table_for_search, shown_columns], leaderboard_table)
355
+ filter_columns.change(filter_items, [hidden_leaderboard_table_for_search, leaderboard_table, filter_columns], leaderboard_table)
356
  with gr.TabItem("πŸ“ About", elem_id="llm-benchmark-tab-table", id=2):
357
  gr.Markdown(LLM_BENCHMARKS_TEXT, elem_classes="markdown-text")
358