import requests
from bs4 import BeautifulSoup
import threading
import time

# 定义线程锁
lock = threading.Lock()

def get_html(url):
    try:
        # 使用更通用的浏览器 user-agent
        headers = {
            "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        return response.text
    except requests.RequestException as e:
        print(f"获取 HTML 时出错: {e}")
        return ""

def parse_news(html):
    soup = BeautifulSoup(html, 'html.parser')
    news_list = []
    # 提取新闻信息，这里需要根据实际网页结构调整选择器
    news_items = soup.find_all('div', class_='hidden')
    for item in news_items:
        # 找到所有的 <a> 标签
        links = item.find_all('a')
        for link in links:
            # 获取新闻标题
            title = link.text.strip()
            # 获取新闻链接
            news = {
                'title': title,
            }
            news_list.append(news)
    return news_list

def write_to_txt(data):
    with lock:
        with open('./utils/temp.txt', 'a', encoding='utf-8') as f:
            for item in data:
                f.write(f"标题: {item['title']}+\n")

def main():
    url = "https://news.163.com/domestic/"
    html = get_html(url)
    if html:
        data = parse_news(html)
        write_to_txt(data)

def run():
    start_time = time.time()
    # 由于获取的是同一网页内容，单线程足够
    main()
    end_time = time.time()
    print(f"程序运行时间: {end_time - start_time} 秒")
