Wiki-ICD10 / scrape_icd_10.py
pgrundmann's picture
Upload 2 files
97f14d3
# pip install sparqlwrapper
# https://rdflib.github.io/sparqlwrapper/
import wikipediaapi
import urllib.parse
import sys
from SPARQLWrapper import SPARQLWrapper, JSON
import json
from tqdm import tqdm
endpoint_url = "https://query.wikidata.org/sparql"
query = """SELECT DISTINCT ?item ?itemLabel ?article ?icd10code WHERE {
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE]". }
{
SELECT DISTINCT ?item ?article ?icd10code WHERE {
?item p:P4229 ?statement0.
?statement0 (ps:P4229) _:anyValueP4229.
FILTER(EXISTS { ?statement0 prov:wasDerivedFrom ?reference. })
OPTIONAL {
?article schema:about ?item .
?article schema:inLanguage "en" .
FILTER (SUBSTR(str(?article), 1, 25) = "https://en.wikipedia.org/")
}
?item wdt:P4229 ?icd10code.
}
}
}
"""
def get_results(endpoint_url, query):
user_agent = "WDQS-example Python/%s.%s" % (sys.version_info[0], sys.version_info[1])
# TODO adjust user agent; see https://w.wiki/CX6
sparql = SPARQLWrapper(endpoint_url, agent=user_agent)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
return sparql.query().convert()
results = get_results(endpoint_url, query)
parsed_urls = set()
wiki_wiki = wikipediaapi.Wikipedia('MyProjectName (merlin@example.com)', 'en')
parsed_wikipedia = {}
article_to_icd_mapping = {}
sections_to_ignore = ["See also", "Explanatory notes","External links", "References", "Citations", "Cited texts", "Further reading"]
for result in tqdm(results["results"]["bindings"]):
if "article" not in result:
continue
wikipedia_url = result["article"]["value"]
icd_code = result["icd10code"]["value"]
qid = result["itemLabel"]
entity_name = wikipedia_url.split("/")[-1]
entity_name = urllib.parse.unquote(entity_name)
if wikipedia_url not in parsed_urls:
page = wiki_wiki.page(entity_name)
if not page.exists():
continue
if wikipedia_url == page.fullurl:
parsed_urls.add(wikipedia_url)
sections = list(filter(lambda s: s.title not in sections_to_ignore, page.sections))
sections = [{"title": s.title, "text": s.full_text()} for s in sections]
article = {"title": entity_name, "icd_code": [icd_code], "sections": sections, "summary": page.summary}
parsed_wikipedia[entity_name] = article
else:
article = parsed_wikipedia[entity_name]
if icd_code not in article["icd_code"]:
article["icd_code"].append(icd_code)
parsed_wikipedia[entity_name] = article
with open("wiki_icd_10.json", "w+") as f:
json.dump(parsed_wikipedia, f)
print("DONE")