costrau commited on
Commit
2510105
1 Parent(s): 1e4b1dc

Add script for crawling quotes

Browse files
Files changed (1) hide show
  1. CrawlingQuotes.py +70 -0
CrawlingQuotes.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import bs4, re, bz2, shutil
2
+ from urllib.request import urlretrieve
3
+ import pandas as pd
4
+
5
+ # Download and unpack file
6
+ filename = "dewikiquote-latest-pages-articles.xml.bz2"
7
+ urlretrieve("https://dumps.wikimedia.org/dewikiquote/latest/" + filename, filename)
8
+ with bz2.BZ2File(filename) as fr, open(filename[:-4],"wb") as fw:
9
+ shutil.copyfileobj(fr,fw)
10
+
11
+ # Open file and parse it
12
+ with open("dewikiquote-latest-pages-articles.xml") as fp:
13
+ soup = bs4.BeautifulSoup(fp, "xml")
14
+ pages = soup.mediawiki.findAll('page')
15
+
16
+
17
+ # Return all quotes on a single page
18
+ def get_quotes(text: str) -> [str]:
19
+ res = []
20
+ # usually a quote is in ONE line
21
+ for line in text.split("\n"):
22
+ # remove leading and trailing whitespaces
23
+ stripped = line.strip()
24
+ # Usually at the bottom, quotes are not from the current author so stop here
25
+ if "zitate mit bezug auf" in stripped.lower():
26
+ return res
27
+ match = re.search("\*\s*(\"[^\"]+\")", stripped)
28
+ if match:
29
+ quote = match.group(1)
30
+ cleaned = re.sub(r'\[\[[^\[]+\]\]', lambda x: x.group()[2:].split("|")[-1][:-2], quote)
31
+ cleaned = re.sub(r'{{[^{}\|]+\|([^{}]+)}}', lambda x: x.group(1), cleaned)
32
+ cleaned = re.sub(r'<[^<>]+>', "",cleaned)
33
+ cleaned = cleaned.replace("//", "") # removes //
34
+ cleaned = re.sub(' +', ' ', cleaned) # remove whitespaces
35
+ if "http" not in cleaned and len(cleaned) > 5:
36
+ res.append(cleaned)
37
+ return res
38
+
39
+ # Get categorie to which a page belongs to
40
+ def get_categories(text: str) -> str:
41
+ return re.findall(r"\[\[Kategorie:([^\]|]+)[^]]*\]\]", text)
42
+
43
+ # def get_movie_quotes(text: str):
44
+ # match = re.search(r"== Zitate ==(.*)== Dialoge ==", text, flags=re.DOTALL)
45
+ # if match:
46
+ # segment = match.group(1)
47
+ # match = re.findall(r"=== ([^=]+) === ([^])", segment)
48
+ # return []
49
+
50
+ # Extract quotes and authors
51
+ data = []
52
+ omitted = set()
53
+ for page in pages:
54
+ author = page.title.text
55
+ raw_text = page.find("text").text
56
+ categories = get_categories(raw_text)
57
+ if "Person" in categories:
58
+ quotes = get_quotes(raw_text)
59
+ for quote in quotes:
60
+ data.append((author, quote))
61
+ # elif "== Filminfo" in raw_text:
62
+ # fiction = get_movie_quotes(raw_text)
63
+
64
+ # break
65
+ else:
66
+ omitted.add(author)
67
+
68
+ # Save results to csv
69
+ df = pd.DataFrame(data, columns=["author", "quote"]).drop_duplicates()
70
+ df.to_csv("train.csv")