Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Update 18449
#2
by
gsarti
- opened
README.md
CHANGED
@@ -4,9 +4,9 @@ emoji: π
|
|
4 |
colorFrom: green
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
-
sdk_version: 4.
|
8 |
app_file: app.py
|
9 |
-
pinned:
|
10 |
header: mini
|
11 |
datasets:
|
12 |
- ICLR2024/ICLR2024-papers
|
|
|
4 |
colorFrom: green
|
5 |
colorTo: purple
|
6 |
sdk: gradio
|
7 |
+
sdk_version: 4.27.0
|
8 |
app_file: app.py
|
9 |
+
pinned: false
|
10 |
header: mini
|
11 |
datasets:
|
12 |
- ICLR2024/ICLR2024-papers
|
app.py
CHANGED
@@ -8,6 +8,10 @@ from papers import PaperList
|
|
8 |
DESCRIPTION = "# ICLR 2024 Papers"
|
9 |
|
10 |
TUTORIAL = """\
|
|
|
|
|
|
|
|
|
11 |
#### Tutorial for claiming the ICLR 2024 papers
|
12 |
|
13 |
1. Find your paper in the table below.
|
@@ -22,10 +26,6 @@ The admin team will validate your request soon. Once confirmed, the Paper page w
|
|
22 |
If you need further assistance, see the guide [here](https://huggingface.co/docs/hub/paper-pages#claiming-authorship-to-a-paper).
|
23 |
|
24 |
If your paper is not yet indexed on Hugging Face, you can index it by following this [guide](https://huggingface.co/docs/hub/paper-pages#can-i-have-a-paper-page-even-if-i-have-no-modeldatasetspace) and open a PR with [this Space](https://huggingface.co/spaces/ICLR2024/update-ICLR2024-papers) to add your Paper page to this Space.
|
25 |
-
|
26 |
-
#### Tutorial for creating a PR
|
27 |
-
|
28 |
-
To add data to the table below, please use [this Space](https://huggingface.co/spaces/ICLR2024/update-ICLR2024-papers) to create a PR.
|
29 |
"""
|
30 |
|
31 |
|
@@ -35,10 +35,7 @@ DEFAULT_COLUMNS = [
|
|
35 |
"Title",
|
36 |
"Type",
|
37 |
"Paper page",
|
38 |
-
"π",
|
39 |
-
"π¬",
|
40 |
"OpenReview",
|
41 |
-
"Project page",
|
42 |
"GitHub",
|
43 |
"Spaces",
|
44 |
"Models",
|
|
|
8 |
DESCRIPTION = "# ICLR 2024 Papers"
|
9 |
|
10 |
TUTORIAL = """\
|
11 |
+
#### Tutorial for creating a PR
|
12 |
+
|
13 |
+
To add data to the table below, please use [this Space](https://huggingface.co/spaces/ICLR2024/update-ICLR2024-papers) to create a PR.
|
14 |
+
|
15 |
#### Tutorial for claiming the ICLR 2024 papers
|
16 |
|
17 |
1. Find your paper in the table below.
|
|
|
26 |
If you need further assistance, see the guide [here](https://huggingface.co/docs/hub/paper-pages#claiming-authorship-to-a-paper).
|
27 |
|
28 |
If your paper is not yet indexed on Hugging Face, you can index it by following this [guide](https://huggingface.co/docs/hub/paper-pages#can-i-have-a-paper-page-even-if-i-have-no-modeldatasetspace) and open a PR with [this Space](https://huggingface.co/spaces/ICLR2024/update-ICLR2024-papers) to add your Paper page to this Space.
|
|
|
|
|
|
|
|
|
29 |
"""
|
30 |
|
31 |
|
|
|
35 |
"Title",
|
36 |
"Type",
|
37 |
"Paper page",
|
|
|
|
|
38 |
"OpenReview",
|
|
|
39 |
"GitHub",
|
40 |
"Spaces",
|
41 |
"Models",
|
papers.py
CHANGED
@@ -24,10 +24,7 @@ class PaperList:
|
|
24 |
["Authors", "str"],
|
25 |
["Type", "str"],
|
26 |
["Paper page", "markdown"],
|
27 |
-
["π", "number"],
|
28 |
-
["π¬", "number"],
|
29 |
["OpenReview", "markdown"],
|
30 |
-
["Project page", "markdown"],
|
31 |
["GitHub", "markdown"],
|
32 |
["Spaces", "markdown"],
|
33 |
["Models", "markdown"],
|
@@ -43,12 +40,11 @@ class PaperList:
|
|
43 |
def get_df() -> pd.DataFrame:
|
44 |
df = pd.merge(
|
45 |
left=datasets.load_dataset("ICLR2024/ICLR2024-papers", split="train").to_pandas(),
|
46 |
-
right=datasets.load_dataset("ICLR2024/ICLR2024-
|
47 |
on="id",
|
48 |
how="left",
|
49 |
)
|
50 |
-
|
51 |
-
df[keys] = df[keys].fillna(-1).astype(int)
|
52 |
df["paper_page"] = df["arxiv_id"].apply(
|
53 |
lambda arxiv_id: f"https://huggingface.co/papers/{arxiv_id}" if arxiv_id else ""
|
54 |
)
|
@@ -66,19 +62,12 @@ class PaperList:
|
|
66 |
n_linked_authors = "" if row.n_linked_authors == -1 else row.n_linked_authors
|
67 |
n_authors = "" if row.n_authors == -1 else row.n_authors
|
68 |
claimed_paper = "" if n_linked_authors == "" else f"{n_linked_authors}/{n_authors} {author_linked}"
|
69 |
-
upvotes = "" if row.upvotes == -1 else row.upvotes
|
70 |
-
num_comments = "" if row.num_comments == -1 else row.num_comments
|
71 |
|
72 |
new_row = {
|
73 |
"Title": row["title"],
|
74 |
"Authors": ", ".join(row["authors"]),
|
75 |
"Type": row["type"],
|
76 |
"Paper page": PaperList.create_link(row["arxiv_id"], row["paper_page"]),
|
77 |
-
"Project page": (
|
78 |
-
PaperList.create_link("Project page", row["project_page"]) if row["project_page"] else ""
|
79 |
-
),
|
80 |
-
"π": upvotes,
|
81 |
-
"π¬": num_comments,
|
82 |
"OpenReview": PaperList.create_link("OpenReview", row["OpenReview"]),
|
83 |
"GitHub": "\n".join([PaperList.create_link("GitHub", url) for url in row["GitHub"]]),
|
84 |
"Spaces": "\n".join(
|
@@ -130,7 +119,7 @@ class PaperList:
|
|
130 |
df = df[df["type"] == presentation_type]
|
131 |
|
132 |
if "Paper page" in filter_names:
|
133 |
-
df = df[df["paper_page"]
|
134 |
if "GitHub" in filter_names:
|
135 |
df = df[df["GitHub"].apply(len) > 0]
|
136 |
if "Space" in filter_names:
|
|
|
24 |
["Authors", "str"],
|
25 |
["Type", "str"],
|
26 |
["Paper page", "markdown"],
|
|
|
|
|
27 |
["OpenReview", "markdown"],
|
|
|
28 |
["GitHub", "markdown"],
|
29 |
["Spaces", "markdown"],
|
30 |
["Models", "markdown"],
|
|
|
40 |
def get_df() -> pd.DataFrame:
|
41 |
df = pd.merge(
|
42 |
left=datasets.load_dataset("ICLR2024/ICLR2024-papers", split="train").to_pandas(),
|
43 |
+
right=datasets.load_dataset("ICLR2024/ICLR2024-num-claimed-papers", split="train").to_pandas(),
|
44 |
on="id",
|
45 |
how="left",
|
46 |
)
|
47 |
+
df[["n_authors", "n_linked_authors"]] = df[["n_authors", "n_linked_authors"]].fillna(-1).astype(int)
|
|
|
48 |
df["paper_page"] = df["arxiv_id"].apply(
|
49 |
lambda arxiv_id: f"https://huggingface.co/papers/{arxiv_id}" if arxiv_id else ""
|
50 |
)
|
|
|
62 |
n_linked_authors = "" if row.n_linked_authors == -1 else row.n_linked_authors
|
63 |
n_authors = "" if row.n_authors == -1 else row.n_authors
|
64 |
claimed_paper = "" if n_linked_authors == "" else f"{n_linked_authors}/{n_authors} {author_linked}"
|
|
|
|
|
65 |
|
66 |
new_row = {
|
67 |
"Title": row["title"],
|
68 |
"Authors": ", ".join(row["authors"]),
|
69 |
"Type": row["type"],
|
70 |
"Paper page": PaperList.create_link(row["arxiv_id"], row["paper_page"]),
|
|
|
|
|
|
|
|
|
|
|
71 |
"OpenReview": PaperList.create_link("OpenReview", row["OpenReview"]),
|
72 |
"GitHub": "\n".join([PaperList.create_link("GitHub", url) for url in row["GitHub"]]),
|
73 |
"Spaces": "\n".join(
|
|
|
119 |
df = df[df["type"] == presentation_type]
|
120 |
|
121 |
if "Paper page" in filter_names:
|
122 |
+
df = df[df["paper_page"].notnull()]
|
123 |
if "GitHub" in filter_names:
|
124 |
df = df[df["GitHub"].apply(len) > 0]
|
125 |
if "Space" in filter_names:
|
requirements.txt
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
-
datasets==2.
|
2 |
-
#gradio==4.
|
3 |
-
huggingface_hub==0.
|
4 |
-
pandas==2.2.
|
5 |
ragatouille==0.0.8.post2
|
6 |
-
tqdm==4.66.
|
|
|
1 |
+
datasets==2.18.0
|
2 |
+
#gradio==4.27.0
|
3 |
+
huggingface_hub==0.22.2
|
4 |
+
pandas==2.2.1
|
5 |
ragatouille==0.0.8.post2
|
6 |
+
tqdm==4.66.2
|