Spaces:
Running
Running
kexinhuang12345
commited on
Commit
β’
c5b2acb
1
Parent(s):
99be919
fix column selection bug
Browse files
app.py
CHANGED
@@ -18,7 +18,7 @@ from src.about import (
|
|
18 |
from src.display.css_html_js import custom_css
|
19 |
from src.display.utils import (
|
20 |
BENCHMARK_COLS,
|
21 |
-
COLS,
|
22 |
COLS_NC,
|
23 |
COLS_NR,
|
24 |
COLS_LP,
|
@@ -70,6 +70,7 @@ def update_table(
|
|
70 |
):
|
71 |
#filtered_df = filter_models(hidden_df, size_query, show_deleted)
|
72 |
filtered_df = filter_queries(query, hidden_df)
|
|
|
73 |
df = select_columns(filtered_df, columns)
|
74 |
return df
|
75 |
|
@@ -83,9 +84,13 @@ def select_columns(df: pd.DataFrame, columns: list) -> pd.DataFrame:
|
|
83 |
"Model"
|
84 |
]
|
85 |
# We use COLS to maintain sorting
|
|
|
|
|
|
|
86 |
filtered_df = df[
|
87 |
-
always_here_cols + [c for c in
|
88 |
]
|
|
|
89 |
return filtered_df
|
90 |
|
91 |
|
@@ -135,6 +140,7 @@ with demo:
|
|
135 |
|
136 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
137 |
with gr.TabItem("π
Entity Classification Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
|
|
|
138 |
COLS = COLS_NC
|
139 |
AutoEvalColumn = AutoEvalColumn_NodeClassification
|
140 |
original_df = get_leaderboard_df(EVAL_REQUESTS_PATH, "Node Classification")
|
@@ -165,8 +171,8 @@ with demo:
|
|
165 |
interactive=True,
|
166 |
)
|
167 |
|
168 |
-
print(leaderboard_df)
|
169 |
-
print(
|
170 |
leaderboard_table = gr.components.Dataframe(
|
171 |
value=leaderboard_df[
|
172 |
[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|
@@ -239,8 +245,8 @@ with demo:
|
|
239 |
interactive=True,
|
240 |
)
|
241 |
|
242 |
-
print(leaderboard_df)
|
243 |
-
print(
|
244 |
leaderboard_table = gr.components.Dataframe(
|
245 |
value=leaderboard_df[
|
246 |
[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|
@@ -312,8 +318,8 @@ with demo:
|
|
312 |
interactive=True,
|
313 |
)
|
314 |
|
315 |
-
print(leaderboard_df)
|
316 |
-
print(
|
317 |
leaderboard_table = gr.components.Dataframe(
|
318 |
value=leaderboard_df[
|
319 |
[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|
|
|
18 |
from src.display.css_html_js import custom_css
|
19 |
from src.display.utils import (
|
20 |
BENCHMARK_COLS,
|
21 |
+
#COLS,
|
22 |
COLS_NC,
|
23 |
COLS_NR,
|
24 |
COLS_LP,
|
|
|
70 |
):
|
71 |
#filtered_df = filter_models(hidden_df, size_query, show_deleted)
|
72 |
filtered_df = filter_queries(query, hidden_df)
|
73 |
+
print(columns)
|
74 |
df = select_columns(filtered_df, columns)
|
75 |
return df
|
76 |
|
|
|
84 |
"Model"
|
85 |
]
|
86 |
# We use COLS to maintain sorting
|
87 |
+
#print(df)
|
88 |
+
#print(df.columns)
|
89 |
+
#print([c for c in df.columns if c in columns])
|
90 |
filtered_df = df[
|
91 |
+
always_here_cols + [c for c in df.columns if c in columns]
|
92 |
]
|
93 |
+
#print(filtered_df)
|
94 |
return filtered_df
|
95 |
|
96 |
|
|
|
140 |
|
141 |
with gr.Tabs(elem_classes="tab-buttons") as tabs:
|
142 |
with gr.TabItem("π
Entity Classification Leaderboard", elem_id="llm-benchmark-tab-table", id=0):
|
143 |
+
global COLS
|
144 |
COLS = COLS_NC
|
145 |
AutoEvalColumn = AutoEvalColumn_NodeClassification
|
146 |
original_df = get_leaderboard_df(EVAL_REQUESTS_PATH, "Node Classification")
|
|
|
171 |
interactive=True,
|
172 |
)
|
173 |
|
174 |
+
#print(leaderboard_df)
|
175 |
+
#print(shown_columns.value)
|
176 |
leaderboard_table = gr.components.Dataframe(
|
177 |
value=leaderboard_df[
|
178 |
[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|
|
|
245 |
interactive=True,
|
246 |
)
|
247 |
|
248 |
+
#print(leaderboard_df)
|
249 |
+
#print(shown_columns)
|
250 |
leaderboard_table = gr.components.Dataframe(
|
251 |
value=leaderboard_df[
|
252 |
[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|
|
|
318 |
interactive=True,
|
319 |
)
|
320 |
|
321 |
+
#print(leaderboard_df)
|
322 |
+
#print(shown_columns)
|
323 |
leaderboard_table = gr.components.Dataframe(
|
324 |
value=leaderboard_df[
|
325 |
[c.name for c in fields(AutoEvalColumn) if c.never_hidden]
|