eckendoerffer commited on
Commit
951e868
1 Parent(s): e852b44

Upload 2 files

Browse files
extract_news/2_extract_news.py ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ """
4
+ News Source Extractor:
5
+
6
+ This script is designed to extract the content of news articles from various French media sources.
7
+ The URLs of these articles are retrieved from the `base_news` table, where articles marked with
8
+ a `step` value of '0' are pending extraction.
9
+
10
+ To install the necessary packages:
11
+ pip install aiohttp mysql-connector-python
12
+
13
+ Once extracted, the content of each article is saved locally for further processing. This separation
14
+ of content fetching and processing is intentional to optimize resource management.
15
+
16
+ The script operates in batches, processing a defined number of entries (`NB_BY_STEP`) at a time.
17
+ After extraction, the `step` value of the processed articles is updated to '1' to indicate completion.
18
+
19
+ Author : Guillaume Eckendoerffer
20
+ Date : 29-09-23
21
+ Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
22
+ https://huggingface.co/datasets/eckendoerffer/news_fr
23
+ """
24
+
25
+ import asyncio
26
+ import aiohttp
27
+ import time
28
+ import mysql.connector
29
+ import os
30
+
31
+ # Database configuration
32
+ db_config = {
33
+ "host": "[host]",
34
+ "user": "[user]",
35
+ "password": "[passwd]",
36
+ "database": "[database]"
37
+ }
38
+
39
+ NB_BY_STEP = 20
40
+ path = os.getcwd()
41
+
42
+ def mysqli_return_number(conn, query):
43
+ cursor = conn.cursor()
44
+ cursor.execute(query)
45
+ result = cursor.fetchone()
46
+ cursor.close()
47
+ return result[0] if result else 0
48
+
49
+ async def fetch_and_save(url, id_source):
50
+ try:
51
+ async with aiohttp.ClientSession() as session:
52
+ async with session.get(url) as response:
53
+ byte_content = await response.read()
54
+
55
+ try:
56
+ text_content = byte_content.decode('utf-8')
57
+ except UnicodeDecodeError:
58
+ text_content = byte_content.decode('ISO-8859-1')
59
+
60
+ with open(f"{path}/sources/html_news/{id_source}.txt", "w", encoding="utf-8") as file:
61
+ file.write(text_content)
62
+
63
+ except aiohttp.client_exceptions.TooManyRedirects:
64
+ print(f"Too many redirects for URL: {url}")
65
+
66
+
67
+ async def main():
68
+ conn = mysql.connector.connect(**db_config)
69
+
70
+ while True:
71
+ time_start = time.time()
72
+
73
+ cursor = conn.cursor()
74
+ cursor.execute(f"SELECT `id`, `url` FROM `base_news` WHERE `step`='0' ORDER BY RAND() LIMIT {NB_BY_STEP}")
75
+ rows = cursor.fetchall()
76
+ cursor.close()
77
+
78
+ if not rows:
79
+ break
80
+
81
+ tasks = []
82
+ for row in rows:
83
+ id_source, url = row
84
+ cursor = conn.cursor()
85
+ cursor.execute(f"UPDATE `base_news` SET `step`='1' WHERE `id`='{id_source}' LIMIT 1")
86
+ print(f'{id_source}) {url}')
87
+ cursor.close()
88
+ tasks.append(fetch_and_save(url.strip(), id_source))
89
+
90
+ await asyncio.gather(*tasks)
91
+
92
+ nb_base = mysqli_return_number(conn, "SELECT COUNT(`id`) FROM `base_news` WHERE `step`='0'")
93
+ time_elapsed = time.time() - time_start
94
+ time_per_item = time_elapsed / NB_BY_STEP
95
+ print(f"Remaining: {nb_base} - Time: {time_per_item:.3f}s/item")
96
+
97
+ conn.close()
98
+
99
+ asyncio.run(main())
extract_news/4_extract_news_url.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ """
4
+ Random Line Fetcher for Large Datasets
5
+
6
+ Extracts and stores relevant links from local French online news articles.
7
+
8
+ pip install beautifulsoup4 mysql-connector-python colorama
9
+
10
+ Author : Guillaume Eckendoerffer
11
+ Date : 28-09-23
12
+ Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
13
+ """
14
+
15
+ import os
16
+ from bs4 import BeautifulSoup
17
+ import mysql.connector
18
+ import hashlib
19
+ from colorama import Fore, init
20
+
21
+ # Database configuration
22
+ db_config = {
23
+ "host": "[host]",
24
+ "user": "[user]",
25
+ "password": "[passwd]",
26
+ "database": "[database]"
27
+ }
28
+
29
+ conn = mysql.connector.connect(**db_config)
30
+ cursor = conn.cursor()
31
+ query = "SELECT `key_media` FROM `base_news` WHERE `key_media` != ''"
32
+ cursor.execute(query)
33
+ keys = cursor.fetchall()
34
+ formatted_keys = "|".join([key[0] for key in keys]) + "|"
35
+
36
+ init(autoreset=True)
37
+
38
+ def get_dom_path(url):
39
+ from urllib.parse import urlparse
40
+ parsed_url = urlparse(url)
41
+ return f"{parsed_url.scheme}://{parsed_url.netloc}"
42
+
43
+ def get_html_content(file_path):
44
+ with open(file_path, 'r', encoding='utf8', errors='ignore') as file:
45
+ return file.read()
46
+
47
+ def mysqli_return_number(conn, query, params=None):
48
+ cursor = conn.cursor()
49
+ cursor.execute(query)
50
+ result = cursor.fetchone()
51
+ cursor.close()
52
+ return result[0] if result else 0
53
+
54
+ def mysqli_return_count(conn, query):
55
+ cursor = conn.cursor()
56
+ cursor.execute(query)
57
+ result = cursor.fetchone()
58
+ cursor.close()
59
+ return result[0] if result else 0
60
+
61
+ def process_news_source():
62
+ global formatted_keys
63
+
64
+ cursor = conn.cursor()
65
+ query = ("SELECT `id`, `url`, `media` FROM `base_news` WHERE `link`='0' AND `step` > 0 AND `id` > 215000 AND `url` NOT LIKE 'https://avis-vin.%' AND `url` NOT LIKE 'https://www.elle.fr/%' AND `url` NOT LIKE 'www.lamontagne.fr/%' AND `url` NOT LIKE 'https://www.rtbf.be/%' AND `url` NOT LIKE 'https://www.tf1info.fr/%' AND `url` NOT LIKE 'https://www.futura-sciences.com/%' AND `url` NOT LIKE 'https://cdn-elle.ladmedia.fr/%' ORDER BY Rand() LIMIT 1")
66
+ cursor.execute(query)
67
+ row = cursor.fetchone()
68
+
69
+ if not row:
70
+ return 'No unprocessed news source found.'
71
+
72
+ id_source, url_source, id_media = row
73
+ dom = get_dom_path(url_source)
74
+ cursor.execute(f"UPDATE `base_news` SET `link`='1' WHERE `id`='{id_source}' LIMIT 1")
75
+ conn.commit()
76
+
77
+ querys = "SELECT COUNT(`id`) FROM `base_news` WHERE `step`='0'"
78
+ nb_link = mysqli_return_count(conn, querys)
79
+
80
+ file_path = f"sources/html_news/{id_source}.txt"
81
+ if os.path.exists(file_path):
82
+ html_content = get_html_content(file_path)
83
+ else:
84
+ return
85
+
86
+ print(f"{nb_link} {url_source} {id_media} ({len(html_content)})")
87
+
88
+ soup = BeautifulSoup(html_content, 'html.parser')
89
+ nb_add = 0
90
+ for link in soup.find_all('a'):
91
+ url = link.get('href')
92
+ if url is None:
93
+ continue
94
+ url = url.split("#")[0]
95
+ url = url.split("?")[0]
96
+
97
+ if not url:
98
+ continue
99
+ if not "//" in url:
100
+ url = f"{dom}/{url}" if url[0] != '/' else f"{dom}{url}"
101
+ elif "http" not in url:
102
+ url = 'https:' + url
103
+ if not url.startswith(("http://", "https://")) or url.count(' ') or url.count('%') or url.count('\''):
104
+ continue
105
+
106
+ key = hashlib.md5(url.encode()).hexdigest()
107
+ nb_base_news = formatted_keys.count(f'{key}|')
108
+
109
+ if url.startswith(dom):
110
+ if nb_base_news:
111
+ #print(Fore.YELLOW + url)
112
+ continue
113
+ elif (
114
+ url.count("-") > 6 and
115
+ not any(substring in url for substring in ['replay', 'video', 'login', '/inloggen', '?redirect', '.jpg', '.png', 'mailto'])
116
+ ):
117
+ print(Fore.GREEN + url)
118
+ insert_query = f"INSERT INTO `base_news` (`id`, `key_media`, `media`, `url`, `step`) VALUES (NULL, '{key}', '{id_media}', '{url}', '0');"
119
+ cursor.execute(insert_query)
120
+ conn.commit()
121
+ formatted_keys = f'{formatted_keys}{key}|'
122
+ nb_add += 1
123
+ else:
124
+ #print(Fore.RED + url)
125
+ continue
126
+
127
+ while True:
128
+ process_news_source()
129
+
130
+ conn.close()
131
+