Rename extract_news/4_extract_url_news.py to extract_news/4_extract_news_url.py
5c28b03
# -*- coding: utf-8 -*- | |
""" | |
News links extractor | |
Extracts and stores relevant links from local French online news articles. | |
pip install beautifulsoup4 mysql-connector-python colorama | |
Author : Guillaume Eckendoerffer | |
Date : 28-09-23 | |
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/ | |
""" | |
import os, hashlib | |
from bs4 import BeautifulSoup | |
from colorama import Fore, init | |
from config import DB_CONFIG | |
from utils import create_connection, get_file_content | |
connection = create_connection(DB_CONFIG) | |
cursor = connection.cursor() | |
query = "SELECT `key_media` FROM `base_news` WHERE `key_media` != ''" | |
cursor.execute(query) | |
keys = cursor.fetchall() | |
formatted_keys = "|".join([key[0] for key in keys]) + "|" | |
init(autoreset=True) | |
def get_dom_path(url): | |
from urllib.parse import urlparse | |
parsed_url = urlparse(url) | |
return f"{parsed_url.scheme}://{parsed_url.netloc}" | |
def process_news_source(id_source, url_source, id_media): | |
global formatted_keys | |
dom = get_dom_path(url_source) | |
cursor.execute(f"UPDATE `base_news` SET `link`='1' WHERE `id`='{id_source}' LIMIT 1") | |
connection.commit() | |
file_path = f"sources/html_news/{id_source}.txt" | |
if os.path.exists(file_path): | |
html_content = get_file_content(file_path) | |
else: | |
return | |
print(f"{id_source} {url_source} {id_media} ({len(html_content)})") | |
soup = BeautifulSoup(html_content, 'html.parser') | |
nb_add = 0 | |
for link in soup.find_all('a'): | |
url = link.get('href') | |
if url is None: | |
continue | |
url = url.split("#")[0] | |
url = url.split("?")[0] | |
if not url: | |
continue | |
if not "//" in url: | |
url = f"{dom}/{url}" if url[0] != '/' else f"{dom}{url}" | |
elif "http" not in url: | |
url = 'https:' + url | |
if not url.startswith(("http://", "https://")) or url.count(' ') or url.count('%') or url.count('\''): | |
continue | |
key = hashlib.md5(url.encode()).hexdigest() | |
nb_base_news = formatted_keys.count(f'{key}|') | |
if url.startswith(dom): | |
if nb_base_news: | |
#print(Fore.YELLOW + url) | |
continue | |
elif ( | |
url.count("-") > 6 and | |
not any(substring in url for substring in ['replay', 'video', 'login', '/inloggen', '?redirect', '.jpg', '.png', 'mailto']) | |
): | |
print(Fore.GREEN + url) | |
insert_query = f"INSERT INTO `base_news` (`id`, `key_media`, `media`, `url`, `step`) VALUES (NULL, '{key}', '{id_media}', '{url}', '0');" | |
cursor.execute(insert_query) | |
connection.commit() | |
formatted_keys = f'{formatted_keys}{key}|' | |
nb_add += 1 | |
else: | |
#print(Fore.RED + url) | |
continue | |
def process(): | |
global formatted_keys | |
cursor = connection.cursor() | |
query = ("SELECT `id`, `url`, `media` FROM `base_news` WHERE `link`='0' AND `step` > 0 ORDER BY Rand() LIMIT 1000") | |
cursor.execute(query) | |
rows = cursor.fetchall() | |
if not rows: | |
print('No unprocessed news source found.') | |
for row in rows: | |
id_source, url_source, id_media = row | |
process_news_source(id_source, url_source, id_media) | |
while True: | |
process() | |