|
|
|
|
|
""" |
|
News article text extractor |
|
|
|
This script extracts the text from locally-stored news articles. The main goal is |
|
to retrieve clean text with minimal external elements, such as user menus, article lists, |
|
and advertisements. |
|
|
|
To install the necessary packages: |
|
pip install mysql-connector-python chardet colorama pyquery |
|
|
|
After completing this step, you can use the Python script located at /dataset/2_cleaning_txt.py |
|
to standardize the text for your dataset. |
|
|
|
Note: |
|
RSS feed links for media sources, as well as the HTML structure of media pages, tend to change |
|
and evolve regularly. It's crucial to regularly check the output per media source and adjust |
|
the parsing process to ensure high-quality text extraction and to address potential changes |
|
in RSS feed URLs. |
|
|
|
Author : Guillaume Eckendoerffer |
|
Date : 29-09-23 |
|
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/ |
|
https://huggingface.co/datasets/eckendoerffer/news_fr |
|
""" |
|
|
|
import chardet |
|
import time, os, re, html, json, hashlib |
|
from colorama import Fore, init |
|
from pyquery import PyQuery as pq |
|
from config import DB_CONFIG |
|
from utils import create_connection, get_file_content, save_to_file, clean_text, decode_unicode_escapes, decode_content |
|
|
|
index_id = 1 |
|
stop_id = 1000000 |
|
path = os.getcwd() |
|
init(autoreset=True) |
|
|
|
connection = create_connection(DB_CONFIG) |
|
cursor = connection.cursor() |
|
query = "SELECT `key_title` FROM `base_news` WHERE `key_title` != ''" |
|
cursor.execute(query) |
|
keys = cursor.fetchall() |
|
formatted_keys = "|".join([key[0] for key in keys]) + "|" |
|
|
|
while True: |
|
id_source ='' |
|
next_id = index_id + 1 |
|
time_start = time.time() |
|
with connection.cursor() as cursor: |
|
cursor = connection.cursor(dictionary=True) |
|
cursor.execute(f"SELECT `id`, `url`, `media`, `key_title` FROM `base_news` WHERE `id`='{index_id}' LIMIT 1") |
|
row = cursor.fetchone() |
|
if row: |
|
id_source = row["id"] |
|
id_media = row["media"] |
|
key_title = row["key_title"] |
|
url = row["url"].strip() |
|
|
|
if key_title.strip(): |
|
index_id = next_id |
|
continue |
|
|
|
if id_source and id_source > stop_id: |
|
break |
|
|
|
|
|
html_content = '' |
|
content='' |
|
title='' |
|
file_path = os.path.join(path, "sources", "html_news", f"{id_source}.txt") |
|
html_content = get_file_content(file_path) |
|
|
|
if '/replay' in url or not html_content: |
|
index_id = next_id |
|
continue |
|
|
|
len_source = len(html_content) |
|
|
|
|
|
if isinstance(html_content, str): |
|
html_content_bytes = html_content.encode('utf-8') |
|
else: |
|
html_content_bytes = html_content |
|
decoded_content = decode_content(html_content_bytes) |
|
if decoded_content is None: |
|
charset_result = chardet.detect(html_content_bytes) |
|
current_encoding = charset_result['encoding'] |
|
try: |
|
html_content = html_content_bytes.decode(current_encoding) |
|
except Exception as e: |
|
print(Fore.WHITE + f"Error: {e}") |
|
index_id = next_id |
|
continue |
|
else: |
|
html_content = decoded_content |
|
|
|
len_or = len(html_content) |
|
|
|
|
|
try: |
|
doc = pq(html_content) |
|
except Exception as e: |
|
print(Fore.WHITE + f"({id_source}) Error parsing HTML: {e} {url}") |
|
index_id = next_id |
|
continue |
|
|
|
|
|
if title.strip() =='': |
|
title = html.unescape(doc('h1:first').text()) |
|
if title.strip() =='': |
|
index_id = next_id |
|
continue |
|
|
|
extract_method = 0 |
|
|
|
match = re.search(r'"articleBody"\s*:\s*"([^"]+)"', html_content) |
|
if match: |
|
content = html.unescape(match.group(1)) |
|
if content.strip(): |
|
extract_method = 1 |
|
try: |
|
content = json.loads(f'"{content}"') |
|
except json.JSONDecodeError: |
|
content = decode_unicode_escapes(content) |
|
|
|
|
|
if not extract_method or len(content) < 100: |
|
p_elements = doc('article p') |
|
if not p_elements: |
|
p_elements = doc('div.Body p') |
|
if not p_elements: |
|
p_elements = doc('div.post-body p') |
|
if not p_elements: |
|
p_elements = doc('div.article_content p') |
|
if not p_elements: |
|
p_elements = doc('div.article__text p') |
|
if not p_elements: |
|
p_elements = doc('div.article-description p') |
|
if not p_elements: |
|
p_elements = doc('div.mainBody p') |
|
if not p_elements: |
|
p_elements = doc('section.article-section p') |
|
if not p_elements: |
|
p_elements = doc('div.article p') |
|
|
|
for p in p_elements: |
|
html_element = pq(p) |
|
html_content += f" {html_element.html().strip()} " |
|
|
|
if ".futura-sciences.com" in url: |
|
html_element.find('a').remove() |
|
content += f" {html.unescape(html_element.text().strip())} " |
|
if content.strip(): extract_method = 2 |
|
|
|
len_text = len(content) |
|
|
|
|
|
content = content.replace('\r', ' ').replace('\n', ' ') |
|
remove_phrases = ['.', '?', '!', ';', '!', '»', ']'] |
|
for phrase in remove_phrases: |
|
content = content.replace(phrase, phrase + ' ') |
|
content = content.replace(html.unescape(' '), ' ') |
|
content = re.sub(r'\s{2,}', ' ', content) |
|
content = re.sub(r'À lire aussi.{1,200}?»', ' ', content) |
|
content = re.sub(r'Temps de Lecture.{1,20}? Fiche', ' ', content) |
|
content = re.sub(r'Temps de Lecture.{1,20}? min.', ' ', content) |
|
|
|
|
|
if ".elle.fr" in url and "sur 5" in content: |
|
content = re.sub(r'Note :.{13,45}? sur 5', ' ', content) |
|
content = content.replace(content[content.find(" sur 5 "):], ' ') |
|
|
|
if ".latribune.fr" in url: |
|
content = content.replace("Partager :", ' ') |
|
|
|
removePhrasesEnd = [ |
|
'Sur le même sujet', |
|
'Sur lemême thème', |
|
'Nos articles à lire aussi', |
|
'Suivez toute l’actualité de vos villes', |
|
'En direct', |
|
"J'ai déjà un compte", |
|
'> Ecoutez', |
|
"Lire aussi >>", |
|
'Courrier international', |
|
'Vous avez trouvé une erreur?', |
|
'Il vous reste', |
|
'Partager', |
|
"Suivez-nous", |
|
'Newsletter', |
|
'Abonnez-vous', |
|
'1€ le premier mois', |
|
'Votre France Bleu', |
|
'Soyez le premier à commenter cet article', |
|
'Pour rester informé(e)', |
|
'Un site du groupe', |
|
"Cet article est réservé aux abonnés", |
|
"Recevez chaque vendredi l'essentiel", |
|
"Suivez toute l'actualité de ZDNet", |
|
"Suivez-nous sur les résaux sociaux", |
|
". par ", |
|
"Le résumé de la semaine", |
|
"ACTUELLEMENT EN KIOSQUE", |
|
" L’actualité par la rédaction de", |
|
"Gratis onbeperkt", |
|
"Débloquez immédiatement cet article", |
|
"À voir également", |
|
"null null null ", |
|
'Du lundi au vendredi, à 19h', |
|
"La rédaction de La Tribune", |
|
"Restez toujours informé: suivez-nous sur Google Actualités", |
|
"Du lundi au vendredi, votre rendez-vous", |
|
"Enregistrer mon nom, mon e-mail", |
|
"Mot de passe oublié", |
|
"accès à ce contenu", |
|
"En cliquant sur", |
|
'(function', |
|
] |
|
|
|
for phrase in removePhrasesEnd: |
|
if phrase in content: |
|
content = content.split(phrase, 1)[0] |
|
|
|
removePhrases = [ |
|
"Inscrivez-vous pour recevoir les newsletters de la Rép' dans votre boîte mail", |
|
"TF1 INFO", |
|
"Sujet TF1 Info", |
|
"Sujet JT LCI", |
|
"TF1 Info ", |
|
"JT 20h WE ", |
|
"JT 20h Semaine ", |
|
"Source :", |
|
"Inscrivez-vous aux newsletters de la RTBF Tous les sujets de l'article", |
|
"Pour voir ce contenu, connectez-vous gratuitement", |
|
">> LIRE AUSSI", |
|
"À LIRE AUSSI", |
|
"A lire aussi >> ", |
|
"» LIRE AUSSI -", |
|
" → À LIRE.", |
|
"À voir également", |
|
"Image d'illustration -", |
|
"Le média de la vie locale ", |
|
"Les plus lus.", |
|
"Ce live est à présent terminé.", |
|
" . -", |
|
"[…]", |
|
"[.]", |
|
"(…)", |
|
"(.)", |
|
"©", |
|
"Tous droits réservés", |
|
" sur TF1", |
|
"Avec AFP", |
|
" AFP /", |
|
"/ AFP ", |
|
". AFP", |
|
" BELGA /", |
|
"GETTY", |
|
"Getty Images", |
|
"→ EXPLICATION", |
|
"→ LES FAITS", |
|
"→", |
|
"À lire aussi", |
|
"EN RÉSUMÉ", |
|
"•", |
|
"►►► ", |
|
"► Écoutez l'entièreté de ce podcast ci-dessus", |
|
"► Pour écouter ce podcast , abonnez-vous", |
|
"►" |
|
] |
|
|
|
|
|
for phrase in removePhrases: |
|
content = content.replace(phrase, '.') |
|
|
|
|
|
content = content.replace(',.', '.') |
|
content = content.replace('.,', '.') |
|
content = re.sub(r'\.{2,}', '.', content) |
|
content = re.sub(r'\s{2,}', ' ', content) |
|
content = re.sub(r'-{2,}', '-', content) |
|
content = re.sub(r'__{2,}', '_', content) |
|
content = re.sub(r'Publié le\s?:? \d{2} ?\/ ?\d{2} ?\/ ?\d{4}( à \d{2}h\d{2})?', '', content) |
|
content = re.sub(r'Mis à jour le \d{1,2} \w+ \. \d{4}', '', content) |
|
matches = [match.group() for match in re.finditer(r'(\d{2}:\d{2})?Modifié le : \d{2}/\d{2}/\d{4}( - \d{2}:\d{2})?', content) if len(match.group()) <= 38] |
|
for match in matches: |
|
content = content.replace(match, ' ') |
|
|
|
|
|
content = re.sub(r'<.*?>', '', content) |
|
add = f"{title}. " |
|
if len(content) > 160: |
|
add += f"{content}." |
|
|
|
add = clean_text(add) |
|
key = hashlib.md5(title.encode()).hexdigest() |
|
nb_base = formatted_keys.count(f'{key}|') |
|
|
|
|
|
color = Fore.GREEN if len(content) > 200 else Fore.WHITE |
|
if len(content) > 200 and nb_base: |
|
color = Fore.CYAN |
|
|
|
if len(content) > 200 and not nb_base: |
|
cursor = connection.cursor() |
|
cursor.execute("UPDATE `base_news` SET `key_title`=%s WHERE `id`=%s LIMIT 1", (key, id_source)) |
|
formatted_keys = f'{formatted_keys}{key}|' |
|
save_to_file(f"{path}/sources/txt_news/{id_source}.txt", add) |
|
|
|
elapsed_time = time.time() - time_start |
|
print(color + f"{id_source:8}) ({extract_method:1}) [{elapsed_time:.3f}] [{len_source:7}{len_text:7}{len(content):7}{len(title):4} ] {url} ") |
|
index_id = next_id |
|
|