jpwahle commited on
Commit
0fba077
β€’
1 Parent(s): 5142c8f

Add remaining app files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ data/nlp_papers_field_diversity.csv filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,10 +1,12 @@
1
  ---
2
- title: Field Time Diversity Docker
3
- emoji: 🐨
4
- colorFrom: gray
5
- colorTo: blue
6
- sdk: docker
7
- pinned: false
 
 
8
  ---
9
 
10
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
  ---
2
+ title: Field Diversity
3
+ emoji: 🌈
4
+ colorFrom: pink
5
+ colorTo: indigo
6
+ sdk: gradio
7
+ sdk_version: 3.29.0
8
+ app_file: app.py
9
+ pinned: true
10
  ---
11
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
aclanthology.py ADDED
@@ -0,0 +1,203 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
2
+ # All rights reserved.
3
+
4
+ import asyncio
5
+ import json
6
+
7
+ import aiohttp
8
+ import requests
9
+ from bs4 import BeautifulSoup
10
+
11
+
12
+ async def fetch(session, url):
13
+ """Asynchronous function to fetch a URL using aiohttp."""
14
+ async with session.get(url) as response:
15
+ return await response.text()
16
+
17
+
18
+ async def async_match_acl_id_to_s2_paper(acl_id):
19
+ """
20
+ Fetches the paper information from the Semantic Scholar API for the given ACL ID.
21
+
22
+ Args:
23
+ acl_id (str): The ACL ID of the paper to fetch.
24
+
25
+ Returns:
26
+ dict: A dictionary containing the paper information.
27
+ """
28
+ url = f"https://api.semanticscholar.org/graph/v1/paper/ACL:{acl_id}"
29
+ async with aiohttp.ClientSession() as session:
30
+ res_text = await fetch(session, url)
31
+ return json.loads(res_text)
32
+
33
+
34
+ def extract_paper_info(paper_url):
35
+ """
36
+ Extracts information about a paper from its ACL Anthology URL.
37
+
38
+ Args:
39
+ paper_url (str): The URL of the paper on the ACL Anthology website.
40
+
41
+ Returns:
42
+ dict: A dictionary containing the title, authors, and ACL Anthology ID of the paper.
43
+ """
44
+ html_doc = requests.get(paper_url, timeout=10).text
45
+ soup = BeautifulSoup(html_doc, "html.parser")
46
+
47
+ title = soup.find("h2", id="title").text.strip()
48
+ authors = [
49
+ a.text
50
+ for a in soup.find_all("a")
51
+ if a.parent.name == "p" and a.parent["class"] == ["lead"]
52
+ ]
53
+ acl_id = paper_url.split("/")[-2]
54
+
55
+ return {"title": title, "authors": authors, "acl_id": acl_id}
56
+
57
+
58
+ def extract_author_info(author_url):
59
+ """
60
+ Extracts author information from the given author URL.
61
+
62
+ Args:
63
+ author_url (str): The URL of the author's page on ACL Anthology.
64
+
65
+ Returns:
66
+ dict: A dictionary containing the author's name and a list of their papers.
67
+ Each paper is represented as a dictionary with keys "title" and "url".
68
+ """
69
+ html_doc = requests.get(author_url, timeout=10).text
70
+ soup = BeautifulSoup(html_doc, "html.parser")
71
+
72
+ author_name = soup.find("h2", id="title").text.strip()
73
+ paper_elements = soup.find_all("p")
74
+ papers = []
75
+ for paper in paper_elements:
76
+ links = paper.find_all("a")
77
+ # Filter out a with text pdf and bib
78
+ links = [
79
+ l for l in links if l.text.strip() not in ["pdf", "bib", "abs"]
80
+ ]
81
+ if not links:
82
+ continue
83
+ title = links[0].text.strip()
84
+ url = "https://aclanthology.org" + links[0]["href"]
85
+ papers.append({"title": title, "url": url})
86
+
87
+ return {"author": author_name, "papers": papers}
88
+
89
+
90
+ def extract_venue_info(venue_url):
91
+ """
92
+ Extracts venue information from the given URL.
93
+
94
+ Args:
95
+ venue_url (str): The URL of the venue to extract information from.
96
+
97
+ Returns:
98
+ dict: A dictionary containing the venue name and a list of papers with their titles and URLs.
99
+ """
100
+ html_doc = requests.get(venue_url, timeout=10).text
101
+ soup = BeautifulSoup(html_doc, "html.parser")
102
+
103
+ venue_name = soup.find("h2", id="title").text.strip()
104
+ paper_elements = soup.find_all("p")
105
+ papers = []
106
+ for paper in paper_elements:
107
+ links = paper.find_all("a")
108
+ # Filter out a with text pdf and bib
109
+ links = [
110
+ l for l in links if l.text.strip() not in ["pdf", "bib", "abs"]
111
+ ]
112
+ if not links:
113
+ continue
114
+ title = links[0].text.strip()
115
+ url = "https://aclanthology.org" + links[0]["href"]
116
+ papers.append({"title": title, "url": url})
117
+
118
+ return {"venue": venue_name, "papers": papers}
119
+
120
+
121
+ def determine_page_type(url):
122
+ """
123
+ Determine the type of ACL Anthology page given its URL.
124
+
125
+ Args:
126
+ url (str): The URL to be checked.
127
+
128
+ Returns:
129
+ str: "paper", "author", or "venue". Returns None if the type can't be determined.
130
+ """
131
+ # Extract last segments from the URL
132
+ segments = [segment for segment in url.split("/") if segment]
133
+
134
+ # Check if the URL points to an event (venue)
135
+ if "events" in url or "volumes" in url:
136
+ return "venue"
137
+
138
+ # If URL ends in a pattern like "2023.acl-long.1" it's a paper
139
+ if len(segments) > 1 and segments[-2].isnumeric() and "." in segments[-1]:
140
+ return "paper"
141
+
142
+ if "people" in url:
143
+ return "author"
144
+
145
+ # If none of the above rules apply, fetch the page and check its content
146
+ try:
147
+ html_doc = requests.get(url, timeout=10).text
148
+ soup = BeautifulSoup(html_doc, "html.parser")
149
+
150
+ # Check for unique elements specific to each page type
151
+ if soup.find("h2", id="title"):
152
+ return (
153
+ "author"
154
+ if soup.find("a", href=True, text="Google Scholar")
155
+ else "paper"
156
+ )
157
+ elif soup.find("h1", text="Anthology Volume"):
158
+ return "venue"
159
+ except Exception as e:
160
+ print(f"Error determining page type: {e}")
161
+
162
+ return None
163
+
164
+
165
+ if __name__ == "__main__":
166
+ loop = asyncio.get_event_loop()
167
+
168
+ urls = [
169
+ "https://aclanthology.org/2023.acl-long.1/",
170
+ "https://aclanthology.org/people/a/anna-rogers/",
171
+ "https://aclanthology.org/events/acl-2022/",
172
+ ]
173
+
174
+ for url in urls:
175
+ if determine_page_type(url) == "paper":
176
+ print(f"Paper: {url}")
177
+ res = extract_paper_info(url)
178
+ paper = loop.run_until_complete(
179
+ async_match_acl_id_to_s2_paper(res["acl_id"])
180
+ )
181
+ print(paper)
182
+
183
+ elif determine_page_type(url) == "author":
184
+ print(f"Author: {url}")
185
+ res = extract_author_info(url)
186
+ tasks = [
187
+ async_match_acl_id_to_s2_paper(paper["url"].split("/")[-2])
188
+ for paper in res["papers"]
189
+ ]
190
+ s2_ids = loop.run_until_complete(asyncio.gather(*tasks))
191
+ for paper, s2_id in zip(res["papers"], s2_ids):
192
+ print(paper["paperId"])
193
+
194
+ elif determine_page_type(url) == "venue":
195
+ print(f"Venue: {url}")
196
+ res = extract_venue_info(url)
197
+ tasks = [
198
+ async_match_acl_id_to_s2_paper(paper["url"].split("/")[-2])
199
+ for paper in res["papers"]
200
+ ]
201
+ s2_ids = loop.run_until_complete(asyncio.gather(*tasks))
202
+ for paper, s2_id in zip(res["papers"], s2_ids):
203
+ print(paper["paperId"])
data/nlp_papers_citation_age.txt ADDED
The diff for this file is too large to render. See raw diff
 
data/nlp_papers_field_diversity.csv ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d9eae4b2ad51e66fa4601af3b05ce130f42b3febe8982d1e3dcb0eef53afa15
3
+ size 20053659
main.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
2
+ # All rights reserved.
3
+ # Thanks to Mukund Rungta for inspiration on early versions of this demo https://huggingface.co/spaces/mrungta8/CitationalAmnesia
4
+
5
+
6
+ import asyncio
7
+
8
+ import gradio as gr
9
+
10
+ from aclanthology import determine_page_type
11
+ from plots import generate_cfdi_plot, generate_maoc_plot
12
+ from s2 import (
13
+ check_s2_id_type,
14
+ compute_stats_for_acl_author,
15
+ compute_stats_for_acl_paper,
16
+ compute_stats_for_acl_venue,
17
+ compute_stats_for_s2_author,
18
+ compute_stats_for_s2_paper,
19
+ )
20
+
21
+
22
+ def return_clear():
23
+ """Clearing all demo inputs
24
+
25
+ Returns:
26
+ None
27
+ """
28
+ return None, None, None, None, None, None, None, None, None, None, None
29
+
30
+
31
+ def create_compute_stats(submit_type=None):
32
+ def compute_stats(s2_id=None, pdf_file=None, acl_link=None):
33
+ if submit_type == "s2_id" and s2_id:
34
+ # Check if s2_id is a paper id or an author id
35
+ id_type, author_name = check_s2_id_type(s2_id)
36
+ if id_type == "paper":
37
+ results = compute_stats_for_s2_paper(s2_id)
38
+ return plot_and_return_stats(*results)
39
+ if id_type == "author":
40
+ results = compute_stats_for_s2_author(s2_id, author_name)
41
+ return plot_and_return_stats(*results)
42
+ if submit_type == "acl_link" and acl_link:
43
+ # Crawl all papers for the author or venue or just the paper if it is a paper link
44
+ url_type = determine_page_type(acl_link)
45
+ if url_type == "paper":
46
+ results = compute_stats_for_acl_paper(acl_link)
47
+ return plot_and_return_stats(*results)
48
+ if url_type == "author":
49
+ results = compute_stats_for_acl_author(acl_link)
50
+ return plot_and_return_stats(*results)
51
+ if url_type == "venue":
52
+ results = compute_stats_for_acl_venue(acl_link)
53
+ return plot_and_return_stats(*results)
54
+ # if submit_type == "pdf_file" and pdf_file:
55
+ # # Compute the citation field diversity index and citation age diversity index
56
+ # pass
57
+ return None, None, None, None, None, None, None, None
58
+
59
+ return compute_stats
60
+
61
+
62
+ def plot_and_return_stats(
63
+ title_authors,
64
+ num_references,
65
+ field_counts,
66
+ year_title_dict,
67
+ cfdi,
68
+ cadi,
69
+ maoc,
70
+ ):
71
+ """
72
+ Plots the data and returns statistics.
73
+
74
+ Args:
75
+ title_authors (str): The title and authors of the paper.
76
+ num_references (int): The number of references in the paper.
77
+ field_counts (dict): A dictionary containing the count of each field.
78
+ year_title_dict (dict): A dictionary containing the year and title of each paper.
79
+ cfdi (list): A list of tuples containing the citation field and the number of papers in that field.
80
+ cadi (list): A list of tuples containing the citation author and the number of papers by that author.
81
+ maoc (list): A list of tuples containing the main author and the number of papers by that author.
82
+
83
+ Returns:
84
+ tuple: A tuple containing the title and authors of the paper, the number of references, the top 3 most cited fields,
85
+ the most common oldest papers, the cfdi, cadi, and the plots for cfdi and maoc.
86
+ """
87
+ # Generate cfdi plot
88
+ plot_cfdi = generate_cfdi_plot(cfdi)
89
+
90
+ # Generate cadi plot
91
+ plot_maoc = generate_maoc_plot(maoc)
92
+
93
+ # Get top 3 most cited fields
94
+ top_fields_text = "\n".join(
95
+ [
96
+ f"{field}: {count}"
97
+ for field, count in sorted(
98
+ field_counts.items(), reverse=True, key=lambda x: x[1]
99
+ )[:3]
100
+ ]
101
+ )
102
+
103
+ # Get most common oldest papers
104
+ oldest_paper_text = "".join(
105
+ f"[{str(year)}] {title}" + "\n"
106
+ for year, title in sorted(year_title_dict.items())[:3]
107
+ )
108
+
109
+ return (
110
+ title_authors,
111
+ num_references,
112
+ top_fields_text,
113
+ oldest_paper_text,
114
+ cfdi,
115
+ cadi,
116
+ plot_cfdi,
117
+ plot_maoc,
118
+ )
119
+
120
+
121
+ with gr.Blocks(
122
+ theme=gr.themes.Soft(), css=".gradio-container {background-color: white}"
123
+ ) as demo:
124
+ with gr.Row():
125
+ gr.Markdown(
126
+ """
127
+ # Citation Age and Field Diversity Calculator
128
+
129
+ <div align="center">
130
+ <img src="https://onedrive.live.com/embed?resid=684CB5200DB6B388%21673358&authkey=%21AOrhvc6YI9lKFSc&width=1582&height=385" />
131
+ </div>
132
+
133
+ Welcome to this interactive demo to analyze various aspects of your citational diversity. This tool will enable you to reflect on two critical aspects:
134
+
135
+ - By whom am I influenced? Which fields heavily inform and shape the research trajectory of my works?
136
+ - How far back in time do I cite? What are critical works (present and past) that shape my research?
137
+
138
+ In addition, you will be able to analyze how the above compares to the average paper or author. The results you will receive can not be categorized into β€œgood” or β€œbad”. Instead, they are meant to raise self-awareness about one’s citational diversity and reflect on it. The results might bring you to further questions, such as:
139
+
140
+ - Am I reading widely across fields and time?
141
+ - Should I expand my literature search to include works from other fields?
142
+ - Are there ideas rooted in the past that can be used in an innovative way?
143
+
144
+ Using citations as a tangible marker of influence, our demo provides empirical insights into the influence of papers across fields and time.
145
+
146
+ ## What is Citation Field Diversity?
147
+
148
+ Field diversity is a measure of the variety of research Fields that a paper or an author draws upon. A high field diversity indicates that the work draws from various distinct research fields, demonstrating a multidisciplinary influence on that work or author.
149
+
150
+ ## What is Citation Age Diversity?
151
+
152
+ Citation age is a measure of how far back in time a paper cites other papers. A high citation age shows that the work draws from past works, while a low citation age indicates that mostly recent work has influenced that paper.
153
+
154
+ """
155
+ )
156
+ gr.Markdown(
157
+ """
158
+ ## What are the Citation Field Diversity Index (CFDI) and Citation Age Diversity Index (CADI) and how are they calculated?
159
+
160
+ The calculation of Field Diversity involves extracting all the references of a paper, categorizing them into distinct study fields, and determining the proportion of each study field over all the references. The Citation Field Diversity Index (CFDI) is then computed by applying the Gini Index on these proportions.
161
+ Calculating CADI is similar to CFDI but instead of determining the proportion of each study field, we determine the proportion of citation ages. If we take a paper from 2020 that cites two papers, one from 2010 and one from 1990, the citation ages are 10 and 30, respectively. The CADI is then computed by applying the Gini Index on these ages.
162
+ For more details, please refer to Eq. 3 in [this paper](https://aclanthology.org/2023.acl-long.341/) and Eq. 4 in [this paper](https://arxiv.org/).
163
+
164
+ ## How do I Interpret CFDI and CADI?
165
+
166
+ For both indices, higher values indicate a greater diversity of a NLP paper (in terms of how far back it cites and in the fields it cites). On the other hand, lower values signify a lower diversity, indicating that citations are more concentrated in specific fields and time ranges.
167
+
168
+ ## How can I use this demo?
169
+
170
+ There are three ways how you to compute the field and age diversity for papers:
171
+ 1. **Semantic Scholar ID**: Enter the Semantic Scholar ID of a **paper** or **author** and click the *"Compute"* button.
172
+ 2. **ACL Anthology Link**: Paste the ACL Anthology link of a **paper**, **venue**, or **author** and click the *"Compute"* button.
173
+ 3. **PDF File**: Upload your **paper** PDF and click the *"Compute"* button.
174
+
175
+ To retrieve the **Semantic Scholar ID** for a paper such as "The Elephant in the Room: Analyzing the Presence of Big Tech in Natural Language Processing Research," search the paper on Semantic Scholar [here](https://www.semanticscholar.org/paper/The-Elephant-in-the-Room%3A-Analyzing-the-Presence-of-Abdalla-Wahle/587ffdfd7229e8e0dbc5250b44df5fad6251f6ad) and use the last part of the URL. The Semantic Scholar ID (SSID) for this paper is: **587ffdfd7229e8e0dbc5250b44df5fad6251f6ad**.
176
+
177
+ To get an ACL Anthology link, you can go to any ACL Anthology paper, author or proceedings page and just copy and paste the url. For example:
178
+ - https://aclanthology.org/2023.acl-long.1/
179
+ - https://aclanthology.org/people/a/anna-rogers/
180
+ - https://aclanthology.org/events/acl-2002/
181
+ """
182
+ )
183
+
184
+ with gr.Row():
185
+ with gr.Tabs():
186
+ with gr.TabItem("Semantic Scholar ID"):
187
+ s2_id = gr.Textbox(
188
+ label="Semantic Scholar ID",
189
+ placeholder=(
190
+ "Enter the Semantic Scholar ID here and press enter..."
191
+ ),
192
+ # value="587ffdfd7229e8e0dbc5250b44df5fad6251f6ad",
193
+ )
194
+ with gr.Row():
195
+ s2_submit_btn = gr.Button("Compute")
196
+ with gr.TabItem("ACL Anthology Link"):
197
+ acl_link = gr.Textbox(
198
+ label="ACL Anthology Link",
199
+ placeholder="Paste the ACL Anthology link here...",
200
+ )
201
+ with gr.Row():
202
+ acl_submit_btn = gr.Button("Compute")
203
+ with gr.TabItem("PDF File"):
204
+ pdf_file = gr.File(
205
+ file_types=[".pdf"], label="Upload your paper PDF"
206
+ )
207
+ with gr.Row():
208
+ file_submit_btn = gr.Button("Compute")
209
+ with gr.Row():
210
+ title = gr.Textbox(
211
+ label="Title / Author Name / Venue Name:", lines=2
212
+ ) # Can be either paper title, author name, or proceedings title
213
+ with gr.Row():
214
+ num_ref = gr.Textbox(label="Number of references", lines=3)
215
+ top_field_list = gr.Textbox(label="Top 3 fields cited:", lines=3)
216
+ top_age_list = gr.Textbox(label="Top 3 oldest papers cited:", lines=3)
217
+ with gr.Row():
218
+ cfdi = gr.Textbox(label="CFDI")
219
+ cadi = gr.Textbox(label="CADI")
220
+ with gr.Row():
221
+ cfdi_plot = gr.Plot(label="Citation Field Diversity")
222
+ cadi_plot = gr.Plot(label="Citation Age Diversity")
223
+ with gr.Row():
224
+ clear_btn = gr.Button("Clear")
225
+
226
+ submit_args = dict(
227
+ inputs=[s2_id, pdf_file, acl_link],
228
+ outputs=[
229
+ title,
230
+ num_ref,
231
+ top_field_list,
232
+ top_age_list,
233
+ cfdi,
234
+ cadi,
235
+ cfdi_plot,
236
+ cadi_plot,
237
+ ],
238
+ )
239
+
240
+ s2_submit_args = submit_args.copy()
241
+ s2_submit_args["fn"] = create_compute_stats(submit_type="s2_id")
242
+
243
+ acl_submit_args = submit_args.copy()
244
+ acl_submit_args["fn"] = create_compute_stats(submit_type="acl_link")
245
+
246
+ file_submit_args = submit_args.copy()
247
+ file_submit_args["fn"] = create_compute_stats(submit_type="pdf_file")
248
+
249
+ s2_id.submit(**s2_submit_args)
250
+ acl_link.submit(**acl_submit_args)
251
+
252
+ acl_submit_btn.click(**acl_submit_args)
253
+ s2_submit_btn.click(**s2_submit_args)
254
+ file_submit_btn.click(**file_submit_args)
255
+
256
+ clear_btn.click(
257
+ fn=return_clear,
258
+ inputs=[],
259
+ outputs=[
260
+ title,
261
+ num_ref,
262
+ top_field_list,
263
+ top_age_list,
264
+ cfdi,
265
+ cadi,
266
+ cfdi_plot,
267
+ cadi_plot,
268
+ s2_id,
269
+ acl_link,
270
+ pdf_file,
271
+ ],
272
+ )
273
+
274
+ demo.queue(concurrency_count=3)
275
+ demo.launch(server_port=7860)
metrics.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
2
+ # All rights reserved.
3
+
4
+ import numpy as np
5
+
6
+
7
+ def calculate_gini_simpson(dictionary):
8
+ """
9
+ Function to Calculate Gini Simpson's Diversity Index
10
+ """
11
+ total = sum(dictionary.values())
12
+ sum_squares = sum((n / total) ** 2 for n in dictionary.values())
13
+ return 1 - sum_squares
14
+
15
+
16
+ def calculate_gini(frequencies):
17
+ """
18
+ Function to Calculate Gini's Diversity Index
19
+ """
20
+ frequencies = np.array(frequencies)
21
+ if len(frequencies) == 0 or np.mean(frequencies) == 0:
22
+ return None
23
+ total = sum(
24
+ np.sum(np.abs(xi - frequencies[i:]))
25
+ for i, xi in enumerate(frequencies[:-1], 1)
26
+ )
27
+ return total / (len(frequencies) ** 2 * np.mean(frequencies))
plots.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
2
+ # All rights reserved.
3
+
4
+ import os
5
+
6
+ import numpy as np
7
+ import pandas as pd
8
+ import seaborn as sns
9
+ from matplotlib import pyplot as plt
10
+ from scipy.stats import gaussian_kde
11
+
12
+ dirname = os.path.dirname(__file__)
13
+
14
+ # Load the csv file into a pandas DataFrame
15
+ papers_df = pd.read_csv(
16
+ os.path.join(dirname, "data/nlp_papers_field_diversity.csv")
17
+ )
18
+
19
+ # Compute the mean CFDI
20
+ mean_cfdi = papers_df["incoming_diversity"].mean()
21
+
22
+ # Compute the mean CADI
23
+ mean_citation_ages = []
24
+
25
+ # Open the file and read the content in a list
26
+ with open(
27
+ os.path.join(dirname, "data/nlp_papers_citation_age.txt"),
28
+ "r",
29
+ encoding="utf-8",
30
+ ) as filehandle:
31
+ for line in filehandle:
32
+ temp = float(line[:-1])
33
+ mean_citation_ages.append(temp)
34
+
35
+
36
+ def generate_cfdi_plot(input_cfdi):
37
+ """
38
+ Function to generate a plot for CFDI
39
+ """
40
+ # Using kdeplot to fill the distribution curve
41
+ sns.set(font_scale=1.3, style="whitegrid")
42
+
43
+ data = papers_df[papers_df["incoming_diversity"] > 0]["incoming_diversity"]
44
+ kde = gaussian_kde(data)
45
+ x_vals = np.linspace(data.min(), data.max(), 1000)
46
+ y_vals = kde.evaluate(x_vals)
47
+
48
+ fig, ax = plt.subplots() # create a new figure and axis
49
+
50
+ ax.fill_between(x_vals, y_vals, color="skyblue", alpha=0.3)
51
+ ax.plot(x_vals, y_vals, color="skyblue", linewidth=2, label="Distribution")
52
+
53
+ interpolated_y_cfdi = np.interp(input_cfdi, x_vals, y_vals)
54
+ ax.scatter(
55
+ input_cfdi,
56
+ interpolated_y_cfdi,
57
+ c="r",
58
+ marker="*",
59
+ linewidths=1,
60
+ zorder=2,
61
+ )
62
+ ax.vlines(
63
+ input_cfdi, 0, interpolated_y_cfdi, color="tomato", ls="--", lw=1.5
64
+ )
65
+ epsilon = 0.005
66
+ # ax.text(
67
+ # input_cfdi + epsilon,
68
+ # interpolated_y_cfdi + epsilon,
69
+ # "Your paper",
70
+ # {"color": "#DC143C", "fontsize": 13},
71
+ # ha="left", # Horizontal alignment
72
+ # )
73
+
74
+ ax.set_xlabel("Citation Field Diversity Index (CFDI)", fontsize=15)
75
+ ax.set_ylabel("Density", fontsize=15)
76
+ sns.despine(left=True, bottom=True, right=True, top=True)
77
+
78
+ return fig
79
+
80
+
81
+ def generate_maoc_plot(input_maoc):
82
+ """
83
+ Function to generate a plot for CFDI
84
+ """
85
+ # Using kdeplot to fill the distribution curve
86
+ sns.set(font_scale=1.3, style="whitegrid")
87
+
88
+ data = pd.DataFrame(mean_citation_ages)[0]
89
+ kde = gaussian_kde(data)
90
+ x_vals = np.linspace(data.min(), data.max(), 1000)
91
+ y_vals = kde.evaluate(x_vals)
92
+
93
+ fig, ax = plt.subplots() # create a new figure and axis
94
+ ax.fill_between(x_vals, y_vals, color="skyblue", alpha=0.3)
95
+ ax.plot(x_vals, y_vals, color="skyblue", linewidth=2, label="Distribution")
96
+
97
+ interpolated_y_cfdi = np.interp(input_maoc, x_vals, y_vals)
98
+ ax.scatter(
99
+ input_maoc,
100
+ interpolated_y_cfdi,
101
+ c="r",
102
+ marker="*",
103
+ linewidths=1,
104
+ zorder=2,
105
+ )
106
+ ax.vlines(
107
+ input_maoc, 0, interpolated_y_cfdi, color="tomato", ls="--", lw=1.5
108
+ )
109
+ epsilon = 0.005
110
+ # ax.text(
111
+ # input_maoc + epsilon,
112
+ # interpolated_y_cfdi + epsilon,
113
+ # "Your paper",
114
+ # {"color": "#DC143C", "fontsize": 13},
115
+ # ha="left", # Horizontal alignment
116
+ # )
117
+
118
+ ax.set_xlabel("Mean Age of Citation (mAoC)", fontsize=15)
119
+ ax.set_ylabel("Density", fontsize=15)
120
+ sns.despine(left=True, bottom=True, right=True, top=True)
121
+
122
+ return fig
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio==3.44.3
2
+ numpy==1.25.2
3
+ matplotlib==3.8.0
4
+ requests==2.31.0
5
+ futures
6
+ seaborn==0.12.2
7
+ scipy==1.11.2
8
+ beautifulsoup4==4.12.2
9
+ aiohttp==3.8.5
10
+ asyncio
s2.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 by Jan Philip Wahle, https://jpwahle.com/
2
+ # All rights reserved.
3
+
4
+ import asyncio
5
+ import os
6
+ from collections import Counter
7
+ from concurrent.futures import ThreadPoolExecutor, as_completed
8
+
9
+ import requests
10
+
11
+ from aclanthology import (
12
+ async_match_acl_id_to_s2_paper,
13
+ extract_author_info,
14
+ extract_paper_info,
15
+ extract_venue_info,
16
+ )
17
+ from metrics import calculate_gini, calculate_gini_simpson
18
+
19
+
20
+ def get_or_create_eventloop():
21
+ try:
22
+ return asyncio.get_event_loop()
23
+ except RuntimeError as ex:
24
+ if "There is no current event loop in thread" in str(ex):
25
+ loop = asyncio.new_event_loop()
26
+ asyncio.set_event_loop(loop)
27
+ return asyncio.get_event_loop()
28
+
29
+
30
+ def send_s2_request(request_url):
31
+ """
32
+ Sends a GET request to the specified URL with the S2 API key in the headers.
33
+
34
+ Args:
35
+ request_url (str): The URL to send the request to.
36
+
37
+ Returns:
38
+ requests.Response: The response object returned by the request.
39
+ """
40
+ return requests.get(
41
+ request_url,
42
+ headers={"x-api-key": os.environ["s2apikey"]},
43
+ timeout=10,
44
+ )
45
+
46
+
47
+ def check_s2_id_type(semantic_scholar_id):
48
+ """
49
+ Check whether a given Semantic Scholar ID is valid for a paper or an author.
50
+
51
+ Args:
52
+ semantic_scholar_id (str): The Semantic Scholar ID to check.
53
+
54
+ Returns:
55
+ tuple: A tuple containing the type of the ID ("paper" or "author") and
56
+ the name of the author (if the ID is valid for an author), or "invalid"
57
+ if the ID is not valid for either a paper or an author.
58
+ """
59
+ # Define the base URL for Semantic Scholar API
60
+ base_url = "https://api.semanticscholar.org/v1/"
61
+
62
+ # First, check if it's a paper ID
63
+ paper_response = requests.get(
64
+ f"{base_url}paper/{semantic_scholar_id}", timeout=5
65
+ )
66
+
67
+ # If the response status code is 200, it means the ID is valid for a paper
68
+ if paper_response.status_code == 200:
69
+ return "paper", None
70
+
71
+ # Next, check if it's an author ID
72
+ author_response = requests.get(
73
+ f"{base_url}author/{semantic_scholar_id}", timeout=5
74
+ )
75
+
76
+ # If the response status code is 200, it means the ID is valid for an author
77
+ return (
78
+ "author",
79
+ author_response.json()["name"]
80
+ if author_response.status_code == 200
81
+ else "invalid",
82
+ )
83
+
84
+
85
+ def get_papers_from_author(ssid_author_id):
86
+ """Retrieves all papers for a given author
87
+
88
+ Args:
89
+ ssid_author_id (str): semantic scholar id
90
+
91
+ Returns:
92
+ list: a list of all papers for the given author
93
+ """
94
+ # Create request URL for an author
95
+ request_url = f"https://api.semanticscholar.org/graph/v1/author/{ssid_author_id}?fields=papers"
96
+
97
+ r = send_s2_request(request_url)
98
+ if r.status_code == 200:
99
+ papers = r.json().get("papers", [])
100
+ return [paper["paperId"] for paper in papers]
101
+ return []
102
+
103
+
104
+ def compute_stats_for_s2_paper(ssid_paper_id):
105
+ """
106
+ Computes statistics for a given paper ID using the Semantic Scholar API.
107
+
108
+ Args:
109
+ ssid_paper_id (str): The Semantic Scholar ID of the paper to compute statistics for.
110
+
111
+ Returns:
112
+ Tuple containing the following statistics:
113
+ - title_authors (str): The title and authors of the paper.
114
+ - num_references (int): The number of references in the paper.
115
+ - fields_of_study_counts (dict): A dictionary containing the count of each field of study in the paper's references.
116
+ - year_to_title_dict (dict): A dictionary mapping the year of each reference to its title.
117
+ - cfdi (float): The CFDI (Cumulative Field Diversity Index) of the paper's references.
118
+ - cadi (float): The CADI (Citation Age Diversity Index) of the paper's references.
119
+ - output_maoc (float): The MAOC (Mean Age of Citation) of the paper's references.
120
+ """
121
+ # Get the paper and its references
122
+ request_url = f"https://api.semanticscholar.org/graph/v1/paper/{ssid_paper_id}?fields=references,title,year,authors"
123
+ r = send_s2_request(request_url)
124
+ if r.status_code == 200: # if successful request
125
+ result = r.json()
126
+ if not result.get("references") or result.get("references") == []:
127
+ return None, None, None, None, None, None, None, None
128
+ s2_ref_paper_keys = [
129
+ reference_paper_tuple["paperId"]
130
+ for reference_paper_tuple in r.json()["references"]
131
+ ]
132
+ filtered_s2_ref_paper_keys = [
133
+ s2_ref_paper_key
134
+ for s2_ref_paper_key in s2_ref_paper_keys
135
+ if s2_ref_paper_key is not None
136
+ ]
137
+ title, year, authors = (
138
+ result["title"],
139
+ result["year"],
140
+ result["authors"],
141
+ )
142
+ title_authors = (
143
+ title + "\n" + ", ".join([author["name"] for author in authors])
144
+ )
145
+
146
+ # Go over the references of the paper
147
+ reference_year_list = []
148
+ reference_title_list = []
149
+ reference_fos_list = []
150
+ with ThreadPoolExecutor() as executor:
151
+ request_url_refs = [
152
+ f"https://api.semanticscholar.org/graph/v1/paper/{ref_paper_key}?fields=title,year,s2FieldsOfStudy"
153
+ for ref_paper_key in filtered_s2_ref_paper_keys
154
+ ]
155
+ futures = [
156
+ executor.submit(send_s2_request, request_url_ref)
157
+ for request_url_ref in request_url_refs
158
+ ]
159
+ for future in as_completed(futures):
160
+ r_ref = future.result()
161
+ if r_ref.status_code == 200:
162
+ result_ref = r_ref.json()
163
+ (title_ref, year_ref, fields_ref) = (
164
+ result_ref["title"],
165
+ result_ref["year"],
166
+ result_ref["s2FieldsOfStudy"],
167
+ )
168
+ reference_year_list.append(year_ref)
169
+ reference_title_list.append(title_ref)
170
+ reference_fos_list.extend(
171
+ field["category"]
172
+ for field in fields_ref
173
+ if field["source"] == "s2-fos-model"
174
+ )
175
+ else:
176
+ print(
177
+ f"Error retrieving reference {r_ref.status_code} for"
178
+ f" paper {ssid_paper_id}"
179
+ )
180
+
181
+ # Remove all None from reference_year_list and reference_title_list
182
+ reference_year_list = [
183
+ year_ref
184
+ for year_ref in reference_year_list
185
+ if year_ref is not None
186
+ ]
187
+ reference_title_list = [
188
+ title_ref
189
+ for title_ref in reference_title_list
190
+ if title_ref is not None
191
+ ]
192
+
193
+ # Count references
194
+ num_references = len(reference_year_list)
195
+
196
+ # Flatten list and count occurrences
197
+ fields_of_study_counts = dict(
198
+ Counter(
199
+ [
200
+ field
201
+ for field in reference_fos_list
202
+ if "Computer Science" not in field
203
+ ]
204
+ )
205
+ )
206
+
207
+ # Citation age list
208
+ aoc_list = [
209
+ year - year_ref
210
+ for year_ref in reference_year_list
211
+ if year_ref and year
212
+ ]
213
+ if not aoc_list:
214
+ return None, None, None, None, None, None, None, None
215
+
216
+ # Compute citation age
217
+ output_maoc = sum(aoc_list) / len(aoc_list)
218
+ cadi = calculate_gini(aoc_list)
219
+
220
+ # Create a dictionary of year to title
221
+ year_to_title_dict = dict(
222
+ zip(reference_year_list, reference_title_list)
223
+ )
224
+
225
+ # Compute CFDI
226
+ cfdi = calculate_gini_simpson(fields_of_study_counts)
227
+
228
+ # Return the results
229
+ return (
230
+ title_authors,
231
+ num_references,
232
+ fields_of_study_counts,
233
+ year_to_title_dict,
234
+ cfdi,
235
+ cadi,
236
+ output_maoc,
237
+ )
238
+
239
+
240
+ def compute_stats_for_s2_author(ssid_author_id, author_name):
241
+ """
242
+ Computes statistics for an author based on their papers in the Semantic Scholar database.
243
+
244
+ Args:
245
+ ssid_author_id (str): The Semantic Scholar author ID.
246
+ author_name (str): The name of the author.
247
+
248
+ Returns:
249
+ dict: A dictionary containing statistics for the author, or None if no papers were found.
250
+ """
251
+ if papers := get_papers_from_author(ssid_author_id):
252
+ return compute_stats_for_multiple_s2_papers(papers, author_name)
253
+ return None
254
+
255
+
256
+ def compute_stats_for_acl_paper(url):
257
+ """
258
+ Computes statistics for a paper based on its ACL Anthology URL.
259
+
260
+ Args:
261
+ url (str): The URL of the paper on the ACL Anthology website.
262
+
263
+ Returns:
264
+ dict: A dictionary containing statistics for the paper, or None if the paper was not found.
265
+ """
266
+ if paper_info := extract_paper_info(url):
267
+ loop = get_or_create_eventloop()
268
+ # Match paper ID to Semantic Scholar ID
269
+ s2_paper = loop.run_until_complete(
270
+ async_match_acl_id_to_s2_paper(paper_info["acl_id"])
271
+ )
272
+ return compute_stats_for_s2_paper(s2_paper["paperId"])
273
+ return None
274
+
275
+
276
+ import asyncio
277
+
278
+
279
+ def compute_stats_for_acl_author(url):
280
+ """
281
+ Computes statistics for an author's papers in the ACL anthology.
282
+
283
+ Args:
284
+ url (str): The URL of the author's page on the ACL anthology website.
285
+
286
+ Returns:
287
+ dict: A dictionary containing statistics for the author's papers, including
288
+ the number of papers, the number of citations, and the h-index.
289
+ Returns None if the author's page cannot be accessed or no papers are found.
290
+ """
291
+ if paper_info := extract_author_info(url):
292
+ loop = get_or_create_eventloop()
293
+ tasks = [
294
+ async_match_acl_id_to_s2_paper(paper["url"].split("/")[-2])
295
+ for paper in paper_info["papers"]
296
+ ]
297
+ papers = loop.run_until_complete(asyncio.gather(*tasks))
298
+ return compute_stats_for_multiple_s2_papers(
299
+ [paper["paperId"] for paper in papers if "paperId" in paper],
300
+ paper_info["author"],
301
+ )
302
+ return None
303
+
304
+
305
+ def compute_stats_for_acl_venue(url):
306
+ if paper_info := extract_venue_info(url):
307
+ loop = get_or_create_eventloop()
308
+ tasks = [
309
+ async_match_acl_id_to_s2_paper(paper["url"].split("/")[-2])
310
+ for paper in paper_info["papers"]
311
+ ]
312
+ papers = loop.run_until_complete(asyncio.gather(*tasks))
313
+ return compute_stats_for_multiple_s2_papers(
314
+ [paper["paperId"] for paper in papers if "paperId" in paper],
315
+ paper_info["venue"],
316
+ )
317
+ return None
318
+
319
+
320
+ def compute_stats_for_multiple_s2_papers(papers, title):
321
+ num_references = 0
322
+ top_fields = {}
323
+ oldest_paper_dict = {}
324
+ cfdi = 0
325
+ cadi = 0
326
+ output_maoc = 0
327
+
328
+ def process_paper(paper):
329
+ return compute_stats_for_s2_paper(paper)
330
+
331
+ with ThreadPoolExecutor() as executor:
332
+ results_list = list(executor.map(process_paper, papers))
333
+
334
+ for results in results_list:
335
+ if not results or results[0] is None:
336
+ continue
337
+ num_references += results[1]
338
+ for field, count in results[2].items():
339
+ top_fields[field] = top_fields.get(field, 0) + count
340
+ for year, title in results[3].items():
341
+ oldest_paper_dict[year] = title
342
+ cfdi += results[4]
343
+ cadi += results[5]
344
+ output_maoc += results[6]
345
+
346
+ return (
347
+ title,
348
+ num_references,
349
+ top_fields,
350
+ oldest_paper_dict,
351
+ cfdi / len(papers),
352
+ cadi / len(papers),
353
+ output_maoc / len(papers),
354
+ )