# !usr/bin/env python
# -*- coding:utf-8 _*-
"""
@Author:张广勤
@Web site: https://www.tunan.wang
@Github:www.github.com
 
@File:news_detail1_1.py.py
@Time:2024/12/9 17:16

@Motto:不积跬步无以至千里，不积小流无以成江海！
"""

import csv
from bs4 import BeautifulSoup
import requests
from concurrent.futures import ThreadPoolExecutor, as_completed
import time

# 假设的 CSV 文件路径
file_path = '../news/outputnews_2024.csv'
# 假设的输出文件路径
output_file_path = 'newsdetail/news_detail1_1.csv'

# 用于跟踪每个日期遇到的第一个链接
first_link_of_day = {}

# 保存新闻详情的列表
news_details = []

# 定义一个函数来获取网页内容
def fetch_news_content(link):
    try:
        response = requests.get(link, timeout=10)  # 设置超时时间
        response.raise_for_status()
        content_encoding = response.encoding if response.encoding else 'utf-8'
        soup = BeautifulSoup(response.content, 'html.parser', from_encoding=content_encoding)
        content_area = soup.find('div', id='content_area')
        # 确保文本是utf-8编码
        content_text = content_area.get_text(separator=' ', strip=True).encode('utf-8').decode('utf-8') if content_area else ''
        return link, content_text
    except requests.RequestException as e:
        print(f"请求链接 {link} 时出错: {e}")
        return link, ''

# 读取 CSV 文件并准备链接列表
links_to_fetch = []
with open(file_path, mode='r', encoding='utf-8') as file:
    reader = csv.DictReader(file)
    for row in reader:
        date, title, link = row['日期'], row['标题'], row['链接']
        if date not in first_link_of_day:
            first_link_of_day[date] = link
            continue
        links_to_fetch.append((date, title, link))

# 开始计时
start_time = time.time()

# 使用线程池来并发获取网页内容
with ThreadPoolExecutor(max_workers=10) as executor:
    future_to_link = {executor.submit(fetch_news_content, link): (date, title, link) for date, title, link in links_to_fetch}
    for future in as_completed(future_to_link):
        date, title, link = future_to_link[future]
        print(date)
        try:
            _, content_text = future.result()
            news_details.append([date, title, link, content_text])
        except Exception as e:
            print(f"处理链接 {link} 时出错: {e}")

# 结束计时
end_time = time.time()

# 计算并打印执行时间
execution_time = end_time - start_time
print(f"执行时间: {execution_time} 秒")

# 写入文件
with open(output_file_path, mode='w', encoding='utf-8', newline='') as output_file:
    writer = csv.writer(output_file)
    writer.writerow(['日期', '标题', '链接', '内容'])
    for detail in news_details:
        writer.writerow(detail)