Spaces:
Running
Running
kexinhuang12345
commited on
Commit
•
aa0703f
1
Parent(s):
ba0a549
update
Browse files- app.py +15 -5
- src/populate.py +2 -2
- src/submission/submit.py +2 -0
app.py
CHANGED
@@ -234,19 +234,28 @@ with demo:
|
|
234 |
)
|
235 |
paper_url_textbox = gr.Textbox(label="Paper URL Link")
|
236 |
github_url_textbox = gr.Textbox(label="GitHub URL Link")
|
237 |
-
|
238 |
-
with gr.Column():
|
239 |
-
test_performance = gr.TextArea(label="Test set performance, use {task: [mean,std]} format e.g. {'rel-amazon/user-churn': [0.352,0.023], 'rel-amazon/user-ltv': [0.304,0.022], ...}")
|
240 |
-
valid_performance = gr.TextArea(label="Validation set performance, use {task: [mean,std]} format e.g. {'rel-amazon/user-churn': [0.352,0.023], 'rel-amazon/user-ltv': [0.304,0.022], ...}")
|
241 |
parameters_textbox = gr.Textbox(label="Number of parameters")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
242 |
honor_code = gr.Dropdown(
|
243 |
choices=[i.value.name for i in HONOR],
|
244 |
-
label="
|
245 |
multiselect=False,
|
246 |
value=None,
|
247 |
interactive=True,
|
248 |
)
|
249 |
|
|
|
|
|
|
|
|
|
|
|
|
|
250 |
submit_button = gr.Button("Submit Eval")
|
251 |
submission_result = gr.Markdown()
|
252 |
submit_button.click(
|
@@ -263,6 +272,7 @@ with demo:
|
|
263 |
github_url_textbox,
|
264 |
parameters_textbox,
|
265 |
honor_code,
|
|
|
266 |
],
|
267 |
submission_result,
|
268 |
)
|
|
|
234 |
)
|
235 |
paper_url_textbox = gr.Textbox(label="Paper URL Link")
|
236 |
github_url_textbox = gr.Textbox(label="GitHub URL Link")
|
|
|
|
|
|
|
|
|
237 |
parameters_textbox = gr.Textbox(label="Number of parameters")
|
238 |
+
task_track = gr.Dropdown(
|
239 |
+
choices=['Node Classification', 'Node Regression', 'Link Prediction'],
|
240 |
+
label="Choose the task track",
|
241 |
+
multiselect=False,
|
242 |
+
value=None,
|
243 |
+
interactive=True,
|
244 |
+
)
|
245 |
honor_code = gr.Dropdown(
|
246 |
choices=[i.value.name for i in HONOR],
|
247 |
+
label="Do you agree to the honor code?",
|
248 |
multiselect=False,
|
249 |
value=None,
|
250 |
interactive=True,
|
251 |
)
|
252 |
|
253 |
+
|
254 |
+
with gr.Column():
|
255 |
+
test_performance = gr.Textbox(lines = 17, label="Test set performance, use {task: [mean,std]} format e.g. {'rel-amazon/user-churn': [0.352,0.023], 'rel-amazon/user-ltv': [0.304,0.022], ...}")
|
256 |
+
valid_performance = gr.Textbox(lines = 17, label="Validation set performance, use {task: [mean,std]} format e.g. {'rel-amazon/user-churn': [0.352,0.023], 'rel-amazon/user-ltv': [0.304,0.022], ...}")
|
257 |
+
|
258 |
+
|
259 |
submit_button = gr.Button("Submit Eval")
|
260 |
submission_result = gr.Markdown()
|
261 |
submit_button.click(
|
|
|
272 |
github_url_textbox,
|
273 |
parameters_textbox,
|
274 |
honor_code,
|
275 |
+
task_track
|
276 |
],
|
277 |
submission_result,
|
278 |
)
|
src/populate.py
CHANGED
@@ -61,10 +61,10 @@ def get_leaderboard_df(EVAL_REQUESTS_PATH, tasks) -> pd.DataFrame:
|
|
61 |
|
62 |
#df_res = pd.DataFrame([{col: model[col] for col in columns_to_show} for model in model_res])
|
63 |
print(df_res)
|
64 |
-
ranks = df_res[list(name2short_name.values())].rank()
|
65 |
df_res.rename(columns={'model': 'Model', 'author': 'Author', 'email': 'Email', 'paper_url': 'Paper URL', 'github_url': 'Github URL', 'submitted_time': 'Time', 'params': '# of Params'}, inplace=True)
|
66 |
df_res['Average Rank⬆️'] = ranks.mean(axis=1)
|
67 |
-
df_res.sort_values(by='Average Rank⬆️', ascending=
|
68 |
return df_res
|
69 |
|
70 |
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
|
|
61 |
|
62 |
#df_res = pd.DataFrame([{col: model[col] for col in columns_to_show} for model in model_res])
|
63 |
print(df_res)
|
64 |
+
ranks = df_res[list(name2short_name.values())].rank(ascending = False)
|
65 |
df_res.rename(columns={'model': 'Model', 'author': 'Author', 'email': 'Email', 'paper_url': 'Paper URL', 'github_url': 'Github URL', 'submitted_time': 'Time', 'params': '# of Params'}, inplace=True)
|
66 |
df_res['Average Rank⬆️'] = ranks.mean(axis=1)
|
67 |
+
df_res.sort_values(by='Average Rank⬆️', ascending=True, inplace=True)
|
68 |
return df_res
|
69 |
|
70 |
def get_evaluation_queue_df(save_path: str, cols: list) -> list[pd.DataFrame]:
|
src/submission/submit.py
CHANGED
@@ -37,6 +37,7 @@ def add_new_eval(
|
|
37 |
github_url,
|
38 |
parameters,
|
39 |
honor_code,
|
|
|
40 |
):
|
41 |
global REQUESTED_MODELS
|
42 |
global USERS_TO_SUBMISSION_DATES
|
@@ -67,6 +68,7 @@ def add_new_eval(
|
|
67 |
"status": "PENDING",
|
68 |
"submitted_time": current_time,
|
69 |
"params": model_size,
|
|
|
70 |
"private": False,
|
71 |
}
|
72 |
|
|
|
37 |
github_url,
|
38 |
parameters,
|
39 |
honor_code,
|
40 |
+
task_track
|
41 |
):
|
42 |
global REQUESTED_MODELS
|
43 |
global USERS_TO_SUBMISSION_DATES
|
|
|
68 |
"status": "PENDING",
|
69 |
"submitted_time": current_time,
|
70 |
"params": model_size,
|
71 |
+
"task": task_track,
|
72 |
"private": False,
|
73 |
}
|
74 |
|