Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
bjoernp commited on
Commit
da1a91a
1 Parent(s): 9328d16

Upload 2 files

Browse files
Files changed (2) hide show
  1. wikitext.py +47 -0
  2. wikitext_en.py +36 -0
wikitext.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bs4 import BeautifulSoup
2
+ from datasets import load_dataset
3
+
4
+ def get_titles(file_path):
5
+ # get the titles from html file (html file is downloaded from https://de.wikipedia.org/wiki/Wikipedia:Exzellente_Artikel)
6
+ with open(file_path, 'r') as f:
7
+ html_content = f.read()
8
+ soup = BeautifulSoup(html_content, 'html.parser')
9
+ # Find the <tbody> element (you can modify this based on your HTML structure)
10
+ tbody = soup.find('tbody')
11
+ # Extract all <tr> elements within the <tbody> element
12
+ trs = tbody.find_all('tr')
13
+ # Remove the first two <tr> elements
14
+ trs = trs[2:]
15
+ # Extract title attributes from the remaining <tr> elements
16
+ titles = []
17
+ for tr in trs:
18
+ if tr is None or tr.find('a') is None:
19
+ continue
20
+ a_tags = tr.find_all('a')
21
+ for a_tag in a_tags:
22
+ if a_tag and 'title' in a_tag.attrs:
23
+ titles.append(a_tag['title'])
24
+ return titles
25
+
26
+ if __name__ == '__main__':
27
+ titles_exzellent = get_titles('exzellent.txt')
28
+ #titles_lesenswert = get_titles('lesenswert.txt')
29
+ titles = titles_exzellent #+ titles_lesenswert
30
+ titles = list(set(titles))
31
+ with open('titles.txt', 'w') as f:
32
+ for title in titles:
33
+ f.write(title + '\n')
34
+
35
+ # Get wikipedia dataset
36
+ dataset = load_dataset("graelo/wikipedia", "20230901.de", split="train")
37
+ # Filter dataset
38
+ dataset = dataset.filter(lambda example: example['title'] in titles, num_proc=64)
39
+ dataset.map(lambda x: {'text': f"# {x['title']}\n\n{x['text']}"}, remove_columns=['title'], num_proc=64)
40
+ # Save dataset
41
+ used_title = [example['title'] for example in dataset]
42
+ non_used_title = [title for title in titles if title not in used_title]
43
+ print(f'Number of used titles: {len(used_title)}')
44
+ print(f'Number of non used titles: {len(non_used_title)}')
45
+ print(non_used_title[:20])
46
+
47
+ dataset.push_to_hub("LeoLM/wiki_de_exzellent", private=True)
wikitext_en.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from bs4 import BeautifulSoup
2
+ from datasets import load_dataset
3
+
4
+ def get_titles(file_path):
5
+ # Get titles from html file (html file is downloaded from https://en.wikipedia.org/wiki/Wikipedia:Featured_articles)
6
+ with open(file_path, 'r') as f:
7
+ html_content = f.read()
8
+ soup = BeautifulSoup(html_content, 'html.parser')
9
+ div = soup.find_all('div', attrs={'class': 'wp-fa-contents'})
10
+ titles = []
11
+ for d in div:
12
+ if d is None or d.find('a') is None:
13
+ continue
14
+ a_tags = d.find_all('a')
15
+ for a_tag in a_tags:
16
+ if a_tag and 'title' in a_tag.attrs:
17
+ titles.append(a_tag['title'])
18
+ return titles
19
+
20
+
21
+ if __name__ == '__main__':
22
+ titles = get_titles('featured.html')
23
+ titles = list(set(titles))
24
+
25
+ # Get wikipedia dataset
26
+ dataset = load_dataset("graelo/wikipedia", "20230901.en", split="train")
27
+ # Filter dataset
28
+ dataset = dataset.filter(lambda example: example['title'] in titles, num_proc=64)
29
+ dataset.map(lambda x: {'text': f"# {x['title']}\n\n{x['text']}"}, remove_columns=['title'], num_proc=64)
30
+ # Save dataset
31
+ used_title = [example['title'] for example in dataset]
32
+ non_used_title = [title for title in titles if title not in used_title]
33
+ print(f'Number of used titles: {len(used_title)}')
34
+ print(f'Number of non used titles: {len(non_used_title)}')
35
+ print(non_used_title[:20])
36
+ dataset.push_to_hub("LeoLM/wiki_en_featured", private=True)