pgrundmann commited on
Commit
6b1649f
1 Parent(s): 2ffa111

Upload scrape_icd_9.py

Browse files
Files changed (1) hide show
  1. scrape_icd_9.py +76 -0
scrape_icd_9.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install sparqlwrapper
2
+ # https://rdflib.github.io/sparqlwrapper/
3
+ import wikipediaapi
4
+ import urllib.parse
5
+ import sys
6
+ from SPARQLWrapper import SPARQLWrapper, JSON
7
+ import json
8
+ from tqdm import tqdm
9
+ endpoint_url = "https://query.wikidata.org/sparql"
10
+
11
+ query = """SELECT DISTINCT ?item ?itemLabel ?article ?icd9code WHERE {
12
+ SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE]". }
13
+ {
14
+ SELECT DISTINCT ?item ?article ?icd9code WHERE {
15
+ ?item p:P1692 ?statement0.
16
+ ?statement0 (ps:P1692) _:anyValueP1692.
17
+ FILTER(EXISTS { ?statement0 prov:wasDerivedFrom ?reference. })
18
+ OPTIONAL {
19
+ ?article schema:about ?item .
20
+ ?article schema:inLanguage "en" .
21
+ FILTER (SUBSTR(str(?article), 1, 25) = "https://en.wikipedia.org/")
22
+ }
23
+ ?item wdt:P1692 ?icd9code.
24
+ }
25
+ }
26
+ }
27
+ """
28
+
29
+
30
+ def get_results(endpoint_url, query):
31
+ user_agent = "WDQS-example Python/%s.%s" % (sys.version_info[0], sys.version_info[1])
32
+ # TODO adjust user agent; see https://w.wiki/CX6
33
+ sparql = SPARQLWrapper(endpoint_url, agent=user_agent)
34
+ sparql.setQuery(query)
35
+ sparql.setReturnFormat(JSON)
36
+ return sparql.query().convert()
37
+
38
+
39
+ results = get_results(endpoint_url, query)
40
+
41
+ parsed_urls = set()
42
+
43
+ wiki_wiki = wikipediaapi.Wikipedia('MyProjectName (merlin@example.com)', 'en')
44
+
45
+ parsed_wikipedia = {}
46
+ article_to_icd_mapping = {}
47
+ sections_to_ignore = ["See also", "Explanatory notes","External links", "References", "Citations", "Cited texts", "Further reading"]
48
+
49
+ for result in tqdm(results["results"]["bindings"]):
50
+ if "article" not in result:
51
+ continue
52
+ wikipedia_url = result["article"]["value"]
53
+ icd_code = result["icd9code"]["value"]
54
+ qid = result["itemLabel"]
55
+ entity_name = wikipedia_url.split("/")[-1]
56
+ entity_name = urllib.parse.unquote(entity_name)
57
+
58
+ if wikipedia_url not in parsed_urls:
59
+ page = wiki_wiki.page(entity_name)
60
+ if not page.exists():
61
+ continue
62
+ if wikipedia_url == page.fullurl:
63
+ parsed_urls.add(wikipedia_url)
64
+ sections = list(filter(lambda s: s.title not in sections_to_ignore, page.sections))
65
+ sections = [{"title": s.title, "text": s.full_text()} for s in sections]
66
+ article = {"title": entity_name, "icd_code": [icd_code], "sections": sections, "summary": page.summary}
67
+ parsed_wikipedia[entity_name] = article
68
+ else:
69
+ article = parsed_wikipedia[entity_name]
70
+ if icd_code not in article["icd_code"]:
71
+ article["icd_code"].append(icd_code)
72
+ parsed_wikipedia[entity_name] = article
73
+
74
+ with open("wiki_icd_9.json", "w+") as f:
75
+ json.dump(parsed_wikipedia, f)
76
+ print("DONE")