Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
wikitext-en-de / wikitext_en.py
bjoernp's picture
Upload 2 files
da1a91a
raw
history blame
1.47 kB
from bs4 import BeautifulSoup
from datasets import load_dataset
def get_titles(file_path):
# Get titles from html file (html file is downloaded from https://en.wikipedia.org/wiki/Wikipedia:Featured_articles)
with open(file_path, 'r') as f:
html_content = f.read()
soup = BeautifulSoup(html_content, 'html.parser')
div = soup.find_all('div', attrs={'class': 'wp-fa-contents'})
titles = []
for d in div:
if d is None or d.find('a') is None:
continue
a_tags = d.find_all('a')
for a_tag in a_tags:
if a_tag and 'title' in a_tag.attrs:
titles.append(a_tag['title'])
return titles
if __name__ == '__main__':
titles = get_titles('featured.html')
titles = list(set(titles))
# Get wikipedia dataset
dataset = load_dataset("graelo/wikipedia", "20230901.en", split="train")
# Filter dataset
dataset = dataset.filter(lambda example: example['title'] in titles, num_proc=64)
dataset.map(lambda x: {'text': f"# {x['title']}\n\n{x['text']}"}, remove_columns=['title'], num_proc=64)
# Save dataset
used_title = [example['title'] for example in dataset]
non_used_title = [title for title in titles if title not in used_title]
print(f'Number of used titles: {len(used_title)}')
print(f'Number of non used titles: {len(non_used_title)}')
print(non_used_title[:20])
dataset.push_to_hub("LeoLM/wiki_en_featured", private=True)