Datasets:

Modalities:
Text
Formats:
json
Size:
< 1K
Libraries:
Datasets
pandas
License:
pgrundmann commited on
Commit
97f14d3
1 Parent(s): c8ee4ad

Upload 2 files

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. scrape_icd_10.py +76 -0
  3. wiki_icd_10.json +3 -0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ wiki_icd_10.json filter=lfs diff=lfs merge=lfs -text
scrape_icd_10.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install sparqlwrapper
2
+ # https://rdflib.github.io/sparqlwrapper/
3
+ import wikipediaapi
4
+ import urllib.parse
5
+ import sys
6
+ from SPARQLWrapper import SPARQLWrapper, JSON
7
+ import json
8
+ from tqdm import tqdm
9
+ endpoint_url = "https://query.wikidata.org/sparql"
10
+
11
+ query = """SELECT DISTINCT ?item ?itemLabel ?article ?icd10code WHERE {
12
+ SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE]". }
13
+ {
14
+ SELECT DISTINCT ?item ?article ?icd10code WHERE {
15
+ ?item p:P4229 ?statement0.
16
+ ?statement0 (ps:P4229) _:anyValueP4229.
17
+ FILTER(EXISTS { ?statement0 prov:wasDerivedFrom ?reference. })
18
+ OPTIONAL {
19
+ ?article schema:about ?item .
20
+ ?article schema:inLanguage "en" .
21
+ FILTER (SUBSTR(str(?article), 1, 25) = "https://en.wikipedia.org/")
22
+ }
23
+ ?item wdt:P4229 ?icd10code.
24
+ }
25
+ }
26
+ }
27
+ """
28
+
29
+
30
+ def get_results(endpoint_url, query):
31
+ user_agent = "WDQS-example Python/%s.%s" % (sys.version_info[0], sys.version_info[1])
32
+ # TODO adjust user agent; see https://w.wiki/CX6
33
+ sparql = SPARQLWrapper(endpoint_url, agent=user_agent)
34
+ sparql.setQuery(query)
35
+ sparql.setReturnFormat(JSON)
36
+ return sparql.query().convert()
37
+
38
+
39
+ results = get_results(endpoint_url, query)
40
+
41
+ parsed_urls = set()
42
+
43
+ wiki_wiki = wikipediaapi.Wikipedia('MyProjectName (merlin@example.com)', 'en')
44
+
45
+ parsed_wikipedia = {}
46
+ article_to_icd_mapping = {}
47
+ sections_to_ignore = ["See also", "Explanatory notes","External links", "References", "Citations", "Cited texts", "Further reading"]
48
+
49
+ for result in tqdm(results["results"]["bindings"]):
50
+ if "article" not in result:
51
+ continue
52
+ wikipedia_url = result["article"]["value"]
53
+ icd_code = result["icd10code"]["value"]
54
+ qid = result["itemLabel"]
55
+ entity_name = wikipedia_url.split("/")[-1]
56
+ entity_name = urllib.parse.unquote(entity_name)
57
+
58
+ if wikipedia_url not in parsed_urls:
59
+ page = wiki_wiki.page(entity_name)
60
+ if not page.exists():
61
+ continue
62
+ if wikipedia_url == page.fullurl:
63
+ parsed_urls.add(wikipedia_url)
64
+ sections = list(filter(lambda s: s.title not in sections_to_ignore, page.sections))
65
+ sections = [{"title": s.title, "text": s.full_text()} for s in sections]
66
+ article = {"title": entity_name, "icd_code": [icd_code], "sections": sections, "summary": page.summary}
67
+ parsed_wikipedia[entity_name] = article
68
+ else:
69
+ article = parsed_wikipedia[entity_name]
70
+ if icd_code not in article["icd_code"]:
71
+ article["icd_code"].append(icd_code)
72
+ parsed_wikipedia[entity_name] = article
73
+
74
+ with open("wiki_icd_10.json", "w+") as f:
75
+ json.dump(parsed_wikipedia, f)
76
+ print("DONE")
wiki_icd_10.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9b9bb1fadd1d7eaaf63712b5c955aa6bab885386f25c4da4edab35bfc24ef54
3
+ size 28941243