Spaces:
Running
Running
kexinhuang12345
commited on
Commit
·
98a0620
1
Parent(s):
215c2d8
add
Browse files- src/about.py +0 -2
- src/populate.py +3 -3
src/about.py
CHANGED
@@ -60,7 +60,6 @@ TITLE = """<p align="center"><img src="https://relbench.stanford.edu/img/logo.pn
|
|
60 |
|
61 |
# What does your leaderboard evaluate?
|
62 |
INTRODUCTION_TEXT = """
|
63 |
-
Relational Deep Learning is a new approach for end-to-end representation learning on data spread across multiple tables, such as in a relational database (see our vision paper). RelBench is the accompanying benchmark which seeks to facilitate efficient, robust and reproducible research in this direction. It comprises of a collection of realistic, large-scale, and diverse datasets structured as relational tables, along with machine learning tasks defined on them. It provides full support for data downloading, task specification and standardized evaluation in an ML-framework-agnostic manner. Additionally, there is seamless integration with PyTorch Geometric to load the data as a graph and train GNN models, and with PyTorch Frame to encode the various types of table columns. Finally, there is a leaderboard for tracking progress.
|
64 |
"""
|
65 |
|
66 |
# Which evaluations are you running? how can people reproduce what you have?
|
@@ -83,7 +82,6 @@ Once you have developed your model and got results, you can submit your test res
|
|
83 |
- **Validation performance**: Validation performance of the model that is used to report the test performance above.
|
84 |
- **Paper URL Link**: The original paper describing the method (arXiv link is recommended. paper needs not be peer-reviewed). If your method has any original component (e.g., even just combining existing methods XXX and YYY), you have to write a technical report describing it (e.g., how you exactly combined XXX and YYY).
|
85 |
- **GitHub URL Link**: The Github repository or directory containining all code to reproduce the result. A placeholder repository is not allowed.
|
86 |
-
- **Number of Parameters**: The number of parameters of your model, which can be calculated by sum(p.numel() for p in model.parameters()). If you use multi-stage training (e.g., apply node2vec and then MLP), please sum up all the parameters (both node2vec and MLP parameters).
|
87 |
- **Honor code**: Please acknowledge that your submission adheres to all the ethical policies and your result is reproducible.
|
88 |
"""
|
89 |
|
|
|
60 |
|
61 |
# What does your leaderboard evaluate?
|
62 |
INTRODUCTION_TEXT = """
|
|
|
63 |
"""
|
64 |
|
65 |
# Which evaluations are you running? how can people reproduce what you have?
|
|
|
82 |
- **Validation performance**: Validation performance of the model that is used to report the test performance above.
|
83 |
- **Paper URL Link**: The original paper describing the method (arXiv link is recommended. paper needs not be peer-reviewed). If your method has any original component (e.g., even just combining existing methods XXX and YYY), you have to write a technical report describing it (e.g., how you exactly combined XXX and YYY).
|
84 |
- **GitHub URL Link**: The Github repository or directory containining all code to reproduce the result. A placeholder repository is not allowed.
|
|
|
85 |
- **Honor code**: Please acknowledge that your submission adheres to all the ethical policies and your result is reproducible.
|
86 |
"""
|
87 |
|
src/populate.py
CHANGED
@@ -56,7 +56,7 @@ def get_leaderboard_df(EVAL_REQUESTS_PATH, task_type) -> pd.DataFrame:
|
|
56 |
for model in model_res:
|
57 |
model["test"] = literal_eval(model["test"])
|
58 |
model["valid"] = literal_eval(model["valid"])
|
59 |
-
model["params"] = int(model["params"])
|
60 |
model['submitted_time'] = model['submitted_time'].split('T')[0]
|
61 |
#model['paper_url'] = '[Link](' + model['paper_url'] + ')'
|
62 |
#model['github_url'] = '[Link](' + model['github_url'] + ')'
|
@@ -65,7 +65,7 @@ def get_leaderboard_df(EVAL_REQUESTS_PATH, task_type) -> pd.DataFrame:
|
|
65 |
for model in model_res:
|
66 |
model.update({name2short_name[i]: str(model['test'][i][0])[:4] + '±' + str(model['test'][i][1])[:4] if i in model['test'] else '-' for i in name2short_name})
|
67 |
|
68 |
-
columns_to_show = ['model', 'author', 'email', 'paper_url', 'github_url', 'submitted_time'
|
69 |
|
70 |
# Check if model_res is empty
|
71 |
if len(model_res) > 0:
|
@@ -77,7 +77,7 @@ def get_leaderboard_df(EVAL_REQUESTS_PATH, task_type) -> pd.DataFrame:
|
|
77 |
#df_res = pd.DataFrame([{col: model[col] for col in columns_to_show} for model in model_res])
|
78 |
print(df_res)
|
79 |
ranks = df_res[list(name2short_name.values())].rank(ascending = ascending)
|
80 |
-
df_res.rename(columns={'model': 'Model', 'author': 'Author', 'email': 'Email', 'paper_url': 'Paper URL', 'github_url': 'Github URL', 'submitted_time': 'Time'
|
81 |
df_res['Average Rank⬆️'] = ranks.mean(axis=1)
|
82 |
df_res.sort_values(by='Average Rank⬆️', ascending=True, inplace=True)
|
83 |
return df_res
|
|
|
56 |
for model in model_res:
|
57 |
model["test"] = literal_eval(model["test"])
|
58 |
model["valid"] = literal_eval(model["valid"])
|
59 |
+
#model["params"] = int(model["params"])
|
60 |
model['submitted_time'] = model['submitted_time'].split('T')[0]
|
61 |
#model['paper_url'] = '[Link](' + model['paper_url'] + ')'
|
62 |
#model['github_url'] = '[Link](' + model['github_url'] + ')'
|
|
|
65 |
for model in model_res:
|
66 |
model.update({name2short_name[i]: str(model['test'][i][0])[:4] + '±' + str(model['test'][i][1])[:4] if i in model['test'] else '-' for i in name2short_name})
|
67 |
|
68 |
+
columns_to_show = ['model', 'author', 'email', 'paper_url', 'github_url', 'submitted_time'] + list(name2short_name.values())
|
69 |
|
70 |
# Check if model_res is empty
|
71 |
if len(model_res) > 0:
|
|
|
77 |
#df_res = pd.DataFrame([{col: model[col] for col in columns_to_show} for model in model_res])
|
78 |
print(df_res)
|
79 |
ranks = df_res[list(name2short_name.values())].rank(ascending = ascending)
|
80 |
+
df_res.rename(columns={'model': 'Model', 'author': 'Author', 'email': 'Email', 'paper_url': 'Paper URL', 'github_url': 'Github URL', 'submitted_time': 'Time'}, inplace=True)
|
81 |
df_res['Average Rank⬆️'] = ranks.mean(axis=1)
|
82 |
df_res.sort_values(by='Average Rank⬆️', ascending=True, inplace=True)
|
83 |
return df_res
|