|
import bs4, re, bz2, shutil |
|
from urllib.request import urlretrieve |
|
import pandas as pd |
|
|
|
|
|
filename = "dewikiquote-latest-pages-articles.xml.bz2" |
|
urlretrieve("https://dumps.wikimedia.org/dewikiquote/latest/" + filename, filename) |
|
with bz2.BZ2File(filename) as fr, open(filename[:-4],"wb") as fw: |
|
shutil.copyfileobj(fr,fw) |
|
|
|
|
|
with open("dewikiquote-latest-pages-articles.xml") as fp: |
|
soup = bs4.BeautifulSoup(fp, "xml") |
|
pages = soup.mediawiki.findAll('page') |
|
|
|
|
|
|
|
def get_quotes(text: str) -> [str]: |
|
res = [] |
|
|
|
for line in text.split("\n"): |
|
|
|
stripped = line.strip() |
|
|
|
if "zitate mit bezug auf" in stripped.lower(): |
|
return res |
|
match = re.search("\*\s*(\"[^\"]+\")", stripped) |
|
if match: |
|
quote = match.group(1) |
|
cleaned = re.sub(r'\[\[[^\[]+\]\]', lambda x: x.group()[2:].split("|")[-1][:-2], quote) |
|
cleaned = re.sub(r'{{[^{}\|]+\|([^{}]+)}}', lambda x: x.group(1), cleaned) |
|
cleaned = re.sub(r'<[^<>]+>', "",cleaned) |
|
cleaned = cleaned.replace("//", "") |
|
cleaned = re.sub(' +', ' ', cleaned) |
|
if "http" not in cleaned and len(cleaned) > 5: |
|
res.append(cleaned) |
|
return res |
|
|
|
|
|
def get_categories(text: str) -> str: |
|
return re.findall(r"\[\[Kategorie:([^\]|]+)[^]]*\]\]", text) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data = [] |
|
omitted = set() |
|
for page in pages: |
|
author = page.title.text |
|
raw_text = page.find("text").text |
|
categories = get_categories(raw_text) |
|
if "Person" in categories: |
|
quotes = get_quotes(raw_text) |
|
for quote in quotes: |
|
data.append((author, quote)) |
|
|
|
|
|
|
|
|
|
else: |
|
omitted.add(author) |
|
|
|
|
|
df = pd.DataFrame(data, columns=["author", "quote"]).drop_duplicates() |
|
df.to_csv("train.csv") |
|
|