File size: 3,815 Bytes
d6234a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78679f7
d6234a1
 
 
 
 
 
 
 
 
 
 
 
78679f7
d6234a1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
78679f7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d6234a1
78679f7
d6234a1
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
# -*- coding: utf-8 -*-

"""
Random Line Fetcher for Large Datasets

Extracts and stores relevant links from local French online news articles.

pip install beautifulsoup4 mysql-connector-python colorama

Author     : Guillaume Eckendoerffer
Date       : 28-09-23
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
             https://huggingface.co/datasets/eckendoerffer/news_fr
"""

import os
from bs4 import BeautifulSoup
import mysql.connector
import hashlib
from colorama import Fore, init

# Database configuration
db_config = {
    "host": "[host]",
    "user": "[user]",
    "password": "[passwd]",
    "database": "[database]"
}

conn = mysql.connector.connect(**db_config)
cursor = conn.cursor()
query = "SELECT `key_media` FROM `base_news` WHERE `key_media` != ''"
cursor.execute(query)
keys = cursor.fetchall()
formatted_keys = "|".join([key[0] for key in keys]) + "|"

init(autoreset=True)

def get_dom_path(url):
    from urllib.parse import urlparse
    parsed_url = urlparse(url)
    return f"{parsed_url.scheme}://{parsed_url.netloc}"

def get_html_content(file_path):
    with open(file_path, 'r', encoding='utf8', errors='ignore') as file:
        return file.read()

def mysqli_return_number(conn, query, params=None):
    cursor = conn.cursor()
    cursor.execute(query)
    result = cursor.fetchone()
    cursor.close()
    return result[0] if result else 0

def process_news_source(id_source, url_source, id_media):
    global formatted_keys
        
    dom = get_dom_path(url_source)
    cursor.execute(f"UPDATE `base_news` SET `link`='1' WHERE `id`='{id_source}' LIMIT 1")
    conn.commit()

    file_path = f"sources/html_news/{id_source}.txt"
    if os.path.exists(file_path):
        html_content = get_html_content(file_path)
    else:
        return  

    print(f"{id_source} {url_source} {id_media} ({len(html_content)})")

    soup = BeautifulSoup(html_content, 'html.parser')
    nb_add = 0
    for link in soup.find_all('a'):
        url = link.get('href')
        if url is None:
            continue
        url = url.split("#")[0]
        url = url.split("?")[0]

        if not url:
            continue
        if not "//" in url:
            url = f"{dom}/{url}" if url[0] != '/' else f"{dom}{url}"
        elif "http" not in url:
            url = 'https:' + url
        if not url.startswith(("http://", "https://")) or url.count(' ') or url.count('%') or url.count('\''):
            continue 

        key = hashlib.md5(url.encode()).hexdigest()
        nb_base_news = formatted_keys.count(f'{key}|')

        if url.startswith(dom):
            if nb_base_news:
                #print(Fore.YELLOW + url)
                continue
            elif (
                url.count("-") > 6 and
                not any(substring in url for substring in ['replay', 'video', 'login', '/inloggen', '?redirect', '.jpg', '.png', 'mailto'])
            ):
                print(Fore.GREEN + url)
                insert_query = f"INSERT INTO `base_news` (`id`, `key_media`, `media`, `url`, `step`) VALUES (NULL, '{key}', '{id_media}', '{url}', '0');"
                cursor.execute(insert_query)
                conn.commit()
                formatted_keys = f'{formatted_keys}{key}|'
                nb_add += 1
        else:
            #print(Fore.RED + url)
            continue

def process():
    global formatted_keys

    cursor = conn.cursor()
    query = ("SELECT `id`, `url`, `media` FROM `base_news` WHERE `link`='0' AND `step` > 0 ORDER BY Rand() LIMIT 1000")
    cursor.execute(query)
    rows = cursor.fetchall()

    if not rows:
        print('No unprocessed news source found.')

    for row in rows:
        id_source, url_source, id_media = row
        process_news_source(id_source, url_source, id_media)

while True:
    process()

conn.close()