# !usr/bin/env python
# -*- coding:utf-8 _*-
"""
@Author:张广勤
@Web site: https://www.tunan.wang
@Github:www.github.com
 
@File:news_detail1_0.py
@Time:2024/12/9 15:38

@Motto:不积跬步无以至千里，不积小流无以成江海！
"""
# 遍历文件的链接，将新闻详情文字部分爬取，保存到文件
import csv
from bs4 import BeautifulSoup
import requests

# 假设的 CSV 文件路径
file_path = '../news/outputnews_2024.csv'
# 假设的输出文件路径
output_file_path = 'newsdetail/news_detail.csv'

# 用于跟踪每个日期遇到的第一个链接
first_link_of_day = {}

# 保存新闻详情的列表
news_details = []

# 打开输入文件并读取内容
with open(file_path, mode='r', encoding='utf-8') as file:
    reader = csv.DictReader(file)

    for row in reader:
        date, title, link = row['日期'], row['标题'], row['链接']
        print(date)

        # 跳过每一天的第一个链接
        if date not in first_link_of_day:
            first_link_of_day[date] = link
            continue

        # 如果不是第一个链接，则尝试获取内容
        try:
            response = requests.get(link)
            response.raise_for_status()  # 确保请求成功

            # 尝试从响应头部获取编码
            content_encoding = response.encoding if response.encoding else 'utf-8'

            # 使用正确的编码来解析网页内容
            soup = BeautifulSoup(response.content, 'html.parser', from_encoding=content_encoding)
            content_area = soup.find('div', id='content_area')

            # 获取 div#content_area 下的文字
            if content_area:
                content_text = content_area.get_text(separator=' ', strip=True)
                news_details.append([date, title, link, content_text])
        except requests.RequestException as e:
            print(f"请求链接 {link} 时出错: {e}")

# 写入文件
with open(output_file_path, mode='w', encoding='utf-8', newline='') as output_file:
    writer = csv.writer(output_file)
    writer.writerow(['日期', '标题', '链接', '内容'])  # 写入新的列标题
    writer.writerows(news_details)  # 一次性写入所有新闻详情



