# !usr/bin/env python
# -*- coding:utf-8 _*-
"""
@Author:张广勤
@Web site: https://www.tunan.wang
@Github:www.github.com
 
@File:news_detail_md1_2.py
@Time:2024/12/24 8:43

@Motto:不积跬步无以至千里，不积小流无以成江海！
"""
# !/usr/bin/env python
# -*- coding:utf-8 -*-

"""
@Author: 张广勤
@Web site: https://www.tunan.wang 
@Github: www.github.com

@File: news_detail_md1_0.py
@Time: 2024/12/9 22:09

@Motto: 不积跬步无以至千里，不积小流无以成江海！
"""
import csv
import requests
from bs4 import BeautifulSoup
import os
import chardet
from cctv_news.getlastdate1_0 import get_latest_date


def save_md(date_news, content_news):
    md_filename = f"news_md/{date_news}.md"
    with open(md_filename, 'w', encoding='utf-8') as md_file:
        md_file.write(f"# {date_news}\n")
        for item_0, item_1 in content_news:
            md_file.write(f"## {item_0}\n")
            md_file.write(f"{item_1}\n\n")

def detail_xinzen():
    # 确保MD文件的目录存在
    if not os.path.exists('news_md'):
        os.makedirs('news_md')

    # 给定的起始日期,YYYY-MM-DD格式程序运行不正确，应是YYYYMMDD
    # news_md目录下最后日期，与outputnews_2024.csv最后日期比较
    start_date = get_latest_date("news_md")

    with open('../news/outputnews_2024.csv', mode='r', encoding='utf-8') as file:
        reader = csv.reader(file)
        headers = next(reader)  # 跳过标题行
        daily_content = []
        current_date = None

        for row in reader:
            # 假设CSV文件的列顺序是日期、标题、链接
            date, title, link = row[0].strip(), row[1].strip(), row[2].strip()
            # print(date)
            # print(title)

            if date != current_date:  # 新的一天
                if daily_content and current_date > start_date:  # 保存前一天的内容
                    save_md(current_date, daily_content)
                daily_content = []
                current_date = date

            if date > start_date:  # 从给定日期开始抓取
                try:
                    response = requests.get(link)
                    response.raise_for_status()  # 确保请求成功
                    soup = BeautifulSoup(response.content, 'html.parser')
                    content_area = soup.find('div', id='content_area')
                    if content_area:
                        # 使用chardet检测编码
                        result = chardet.detect(content_area.encode())
                        encoding = result['encoding']

                        # 如果检测到的编码不是utf-8，就使用检测到的编码解码
                        if encoding.lower() != 'utf-8':
                            content = content_area.text.encode('latin1').decode(encoding)
                        else:
                            content = content_area.text

                        print(content)
                        daily_content.append((title, content.strip()))
                except requests.RequestException as e:
                    print(f"请求错误：{e}")
                except AttributeError:  # 'content_area'未找到
                    print(f"未找到content_area: {link}")

        # 保存最后一天的内容
        if daily_content and current_date >= start_date:
            save_md(current_date, daily_content)

if __name__ == "__main__":
    detail_xinzen()
