daiqi commited on
Commit
90def7d
·
verified ·
1 Parent(s): df28153

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -23
app.py CHANGED
@@ -1,12 +1,10 @@
1
  import gradio as gr
2
- from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
  from huggingface_hub import snapshot_download
6
 
7
- import os
8
- print("@@@@@@@@@@@", os.getcwd())
9
- os.system("ls ./")
10
 
11
  from src.about import (
12
  CITATION_BUTTON_LABEL,
@@ -54,6 +52,9 @@ except Exception:
54
 
55
 
56
  LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
 
 
 
57
 
58
  (
59
  finished_eval_queue_df,
@@ -65,30 +66,29 @@ def init_leaderboard(dataframe):
65
  if dataframe is None or dataframe.empty:
66
  raise ValueError("Leaderboard DataFrame is empty or None.")
67
  return Leaderboard(
68
- value=dataframe,
69
- datatype=[c.type for c in fields(AutoEvalColumn)],
70
  select_columns=SelectColumns(
71
- default_selection=[c.name for c in fields(AutoEvalColumn) if c.displayed_by_default],
72
- cant_deselect=[c.name for c in fields(AutoEvalColumn) if c.never_hidden],
73
  label="Select Columns to Display:",
74
  ),
75
- search_columns=[AutoEvalColumn.model.name, AutoEvalColumn.license.name],
76
- hide_columns=[c.name for c in fields(AutoEvalColumn) if c.hidden],
 
77
  filter_columns=[
78
- ColumnFilter(AutoEvalColumn.model_type.name, type="checkboxgroup", label="Model types"),
79
- ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
80
- ColumnFilter(
81
- AutoEvalColumn.params.name,
82
- type="slider",
83
- min=0.01,
84
- max=150,
85
- label="Select the number of parameters (B)",
86
- ),
87
- ColumnFilter(
88
- AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
89
- ),
90
  ],
91
- bool_checkboxgroup_label="Hide models",
92
  interactive=False,
93
  )
94
 
 
1
  import gradio as gr
2
+ from gradio_leaderboard import Leaderboard, ColumnFilter, SelectColumns, SearchColumns
3
  import pandas as pd
4
  from apscheduler.schedulers.background import BackgroundScheduler
5
  from huggingface_hub import snapshot_download
6
 
7
+
 
 
8
 
9
  from src.about import (
10
  CITATION_BUTTON_LABEL,
 
52
 
53
 
54
  LEADERBOARD_DF = get_leaderboard_df(EVAL_RESULTS_PATH, EVAL_REQUESTS_PATH, COLS, BENCHMARK_COLS)
55
+ _test_data = pd.DataFrame({"Name": ["MageBench", "MageBench", "MageBench"], "BaseModel": ["GPT-4o", "GPT-4o", "GPT-4o"], "Env.": ["Sokoban", "Sokoban", "Football"],
56
+ "Target-research": ["Model-Eval-Global", "Model-Eval-Online", "Agent-Eval-Prompt"], "Link": ["xxx", "xxx", "xxx"]})
57
+
58
 
59
  (
60
  finished_eval_queue_df,
 
66
  if dataframe is None or dataframe.empty:
67
  raise ValueError("Leaderboard DataFrame is empty or None.")
68
  return Leaderboard(
69
+ value=_test_data, #dataframe,
 
70
  select_columns=SelectColumns(
71
+ default_selection=["Name", "BaseModel", "Link", "Env."],
72
+ cant_deselect=["Name", "BaseModel", "Link", "Env."],
73
  label="Select Columns to Display:",
74
  ),
75
+ search_columns=SearchColumns(primary_column="Name", secondary_columns="BaseModel",
76
+ placeholder="Search by work name or basemodel. To search by country, type 'basemodel:<query>'",
77
+ label="Search"),
78
  filter_columns=[
79
+ ColumnFilter("Target-research", type="checkbox", label="Comparison settings for target researches"),
80
+ # ColumnFilter(AutoEvalColumn.precision.name, type="checkboxgroup", label="Precision"),
81
+ # ColumnFilter(
82
+ # AutoEvalColumn.params.name,
83
+ # type="slider",
84
+ # min=0.01,
85
+ # max=150,
86
+ # label="Select the number of parameters (B)",
87
+ # ),
88
+ # ColumnFilter(
89
+ # AutoEvalColumn.still_on_hub.name, type="boolean", label="Deleted/incomplete", default=True
90
+ # ),
91
  ],
 
92
  interactive=False,
93
  )
94