# pip install beautifulsoup4 lxml
import json
import os.path

import requests
from sympy.physics.units import current
from config import base_dir
from datetime import date, timedelta
from http_client_tools import HttpClient
from bs4 import BeautifulSoup


def get_news_current_data():
    # 获取 bbs 的内容
    url = 'https://news.yahoo.com/'

    http_client = HttpClient()
    file_name = http_client.download_url_main(url)
    print(file_name)
    # 读取HTML文件内容
    with open(file_name, 'r', encoding='utf-8') as file:
        html_content = file.read()

    # 使用BeautifulSoup解析HTML内容
    soup = BeautifulSoup(html_content, 'lxml')
    # #
    # # 提取标题
    # title = soup.title.string
    # print(f"Title: {title}")

    # 提取<p>标签的内容
    main_content = soup.find('div', id='module-ntk', class_="wafer-rapid-module module-ntk")
    top_list = []
    top_other_list = []

    for item in main_content.find_all('h3'):
        # print(item)
        # print(type(item))
        top_list.append(item.text)

    data = top_other_list + top_list
    new_data = []
    for d in data:
        if d not in new_data:
            new_data.insert(0, d)

    return new_data


def get_current_date():
    # 获取今天的日期（不包含时间）
    today = date.today() - timedelta(days=9 / 24)
    # print(today)
    current_data = today.strftime("%Y-%m-%d")
    return current_data


data = get_news_current_data()

date_dir = base_dir + os.sep + get_current_date()
if not os.path.exists(date_dir):
    os.mkdir(date_dir)
save_file_dir = date_dir + os.sep + "news.yahoo.com.json"

if os.path.exists(save_file_dir):
    # 从文件中读取 JSON 数据，并将其解析为 Python 对象
    with open(save_file_dir, 'r', encoding='utf-8') as json_file:
        loaded_data = json.load(json_file)
        # print(loaded_data)
        for d in data:
            if d not in loaded_data:
                loaded_data.insert(0, d)
    # 将 Python 对象序列化为 JSON 格式，并写入文件
    with open(save_file_dir, 'w', encoding='utf-8') as json_file:
        json.dump(loaded_data, json_file, ensure_ascii=False, indent=4)
    print("数据已写入 {} 文件".format(save_file_dir))
else:
    # 将 Python 对象序列化为 JSON 格式，并写入文件
    # print(loaded_data)
    loaded_data = []
    for d in data:
        if d not in loaded_data:
            loaded_data.insert(0, d)
    loaded_data = list(set(loaded_data))
    with open(save_file_dir, 'w', encoding='utf-8') as json_file:
        json.dump(loaded_data, json_file, ensure_ascii=False, indent=4)
    print("数据已写入 {} 文件".format(save_file_dir))

# # 提取所有的<li>标签的内容
# li_items = [li.text for li in soup.find_all('li')]
# print("List items:")
# for item in li_items:
#     print(f"- {item}")
