wikipedia_fr / extract_wiki /1_extract_link.py
eckendoerffer's picture
Upload 6 files
8ae2c07
# -*- coding: utf-8 -*-
"""
Wikipedia URLs Extractor:
Script to download the Wikipedia dataset from Hugging Face, extract URLs,
and save them to a text file for further processing.
Required:
pip install datasets
Using MIN_LENGTH = 1400 results in approximately 1,100,000 URLs.
Using MIN_LENGTH = 1000 results in approximately 1,800,000 URLs.
Author : Guillaume Eckendoerffer
Date : 14-09-23
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
https://huggingface.co/datasets/eckendoerffer/wikipedia_fr
"""
import os
from datasets import load_dataset
# Constants
MIN_LENGTH = 1400 # Minimum text length in number of characters to be added to the URL list.
EXCLUDE_TITLES_START = ['Liste ', 'Abréviations ', '(homonymie)']
# File path configurations
PATH = os.path.dirname(os.path.abspath(__file__))
URL_FILEPATH = os.path.join(PATH, "wiki_link.txt")
# Resetting the output file
with open(URL_FILEPATH, 'w', encoding="utf8") as url_file:
url_file.write("")
# Loading the dataset
dataset = load_dataset('wikipedia', '20220301.fr')
subset = dataset["train"]
add = 0
for i, row in enumerate(subset):
text = row["text"]
title = row["title"]
url = row["url"]
# Checking conditions to add the URL
if not any(title.startswith(e) for e in EXCLUDE_TITLES_START) and len(text) >= MIN_LENGTH:
add += 1
print(f"{add} : {len(text)} : {url} : {title}")
with open(URL_FILEPATH, 'a', encoding="utf8") as url_file:
url_file.write(f"{url.strip()} \n")