File size: 2,719 Bytes
6b1649f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 |
# pip install sparqlwrapper
# https://rdflib.github.io/sparqlwrapper/
import wikipediaapi
import urllib.parse
import sys
from SPARQLWrapper import SPARQLWrapper, JSON
import json
from tqdm import tqdm
endpoint_url = "https://query.wikidata.org/sparql"
query = """SELECT DISTINCT ?item ?itemLabel ?article ?icd9code WHERE {
SERVICE wikibase:label { bd:serviceParam wikibase:language "[AUTO_LANGUAGE]". }
{
SELECT DISTINCT ?item ?article ?icd9code WHERE {
?item p:P1692 ?statement0.
?statement0 (ps:P1692) _:anyValueP1692.
FILTER(EXISTS { ?statement0 prov:wasDerivedFrom ?reference. })
OPTIONAL {
?article schema:about ?item .
?article schema:inLanguage "en" .
FILTER (SUBSTR(str(?article), 1, 25) = "https://en.wikipedia.org/")
}
?item wdt:P1692 ?icd9code.
}
}
}
"""
def get_results(endpoint_url, query):
user_agent = "WDQS-example Python/%s.%s" % (sys.version_info[0], sys.version_info[1])
# TODO adjust user agent; see https://w.wiki/CX6
sparql = SPARQLWrapper(endpoint_url, agent=user_agent)
sparql.setQuery(query)
sparql.setReturnFormat(JSON)
return sparql.query().convert()
results = get_results(endpoint_url, query)
parsed_urls = set()
wiki_wiki = wikipediaapi.Wikipedia('MyProjectName (merlin@example.com)', 'en')
parsed_wikipedia = {}
article_to_icd_mapping = {}
sections_to_ignore = ["See also", "Explanatory notes","External links", "References", "Citations", "Cited texts", "Further reading"]
for result in tqdm(results["results"]["bindings"]):
if "article" not in result:
continue
wikipedia_url = result["article"]["value"]
icd_code = result["icd9code"]["value"]
qid = result["itemLabel"]
entity_name = wikipedia_url.split("/")[-1]
entity_name = urllib.parse.unquote(entity_name)
if wikipedia_url not in parsed_urls:
page = wiki_wiki.page(entity_name)
if not page.exists():
continue
if wikipedia_url == page.fullurl:
parsed_urls.add(wikipedia_url)
sections = list(filter(lambda s: s.title not in sections_to_ignore, page.sections))
sections = [{"title": s.title, "text": s.full_text()} for s in sections]
article = {"title": entity_name, "icd_code": [icd_code], "sections": sections, "summary": page.summary}
parsed_wikipedia[entity_name] = article
else:
article = parsed_wikipedia[entity_name]
if icd_code not in article["icd_code"]:
article["icd_code"].append(icd_code)
parsed_wikipedia[entity_name] = article
with open("wiki_icd_9.json", "w+") as f:
json.dump(parsed_wikipedia, f)
print("DONE") |