import json
import os
from pathlib import Path
import requests
from bs4 import BeautifulSoup
import urllib.parse


def get_top_news():
    url = "https://tophub.today/n/KqndgxeLl9"
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/91.0.4472.124 Safari/537.36"
    }
    response = requests.get(url, headers=headers)
    if response.status_code == 200:
        soup = BeautifulSoup(response.text, "lxml")
        search_results = soup.find_all('table', class_='table')[0].find_all('td', class_='al')
        news_dict = {}
        for ind, search in enumerate(search_results):
            title = search.find('a').text
            news_dict[f"{ind}"] = [{"title": title,
                                    "link": "https://tophub.today" + search.find('a')['href']
                                    }]

            # print(title)
        with open('data.json', "w") as f:
            json.dump(news_dict, fp=f, ensure_ascii=False)
        return news_dict
    else:
        print(f"请求失败，状态码: {response.status_code}")
        return False


import requests
from bs4 import BeautifulSoup
import urllib.parse
from utils import *


def google_search(query, exist_html=None):
    if exist_html:
        with open(exist_html, 'r', encoding='utf-8') as file:
            html_content = file.read()
        soup = BeautifulSoup(html_content, 'html.parser')
        json_search_output = extract_data(soup)
    else:
        # 将搜索关键词进行URL编码
        question = urllib.parse.quote_plus(query)
        # 构建Google搜索URL
        url = f"https://g.savalone.com/search?q={question}"

        # 发送HTTP请求到Google
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
        }

        response = requests.get(url, headers=headers)

        if response.status_code == 200:

            filename = f"{query}.html"

            # 使用 Path 对象创建文件
            path = Path(filename)

            # 写入 HTML 内容
            path.write_text(response.text, encoding='utf-8')

            print(f"文件已保存: {path}")

            # 使用BeautifulSoup解析HTML页面
            soup = BeautifulSoup(response.text, "lxml")
            json_search_output = extract_data(soup)
        else:
            print(f"请求失败，状态码: {response.status_code}")
    return json_search_output


def extract_data(soup):
    # 查找所有的搜索结果
    # dURPMD
    search_results = soup.find('div', id='search')  # 限定为搜索结果
    search_results = search_results.find('div', class_='dURPMd')  # 限定为搜索结果中的主要div部分
    # 包含有ULSxyf、MjjYud、hlcw0c等部分
    search_results = search_results.find_all('div', recursive=False)  # 非递归搜索，只查找一级目录
    json_file = {}
    # 输出搜索结果的标题和链接
    for i, result in enumerate(search_results):
        tag_attr = result['class'][0]
        i_json = tag_analysis(tag_attr, result)
        json_file[f"{i}"] = i_json
        # if result.find('div', class_='iHxmLe') is None and result.text != "":  # 不解析视频源
        #     try:
        #         link_and_title = result.find('div',
        #                                  class_="kb0PBd cvP2Ce A9Y9g jGGQ5e")  # rso > div:nth-child(3) > div > div > div.kb0PBd.cvP2Ce.A9Y9g.jGGQ5e
        #     except:
        #         link_and_title = ""
        #     title = link_and_title.find('h3').text
        #     link = link_and_title.find('a')['href']
        #     source_organization = link_and_title.find('span', class_="VuuXrf").text
        #
        #     time_and_descriptions = result.find('div', class_="kb0PBd cvP2Ce A9Y9g")
        #     time_and_descriptions = time_and_descriptions.find_all('span')
        #     # time = time_and_descriptions[0].find('span', class_="LEwnzc Sqrs4e")
        #     try:
        #         description = time_and_descriptions[-1].text
        #     except:
        #         description = ""
        #
        #     json_file[f"{i}"] = [{"title": title,
        #                           "link": link,
        #                           "description": description,
        #                           "source_organization": source_organization
        #                           }]
    return json_file


# 示例调用
if __name__ == "__main__":
    # news_dict = get_top_news()
    # json_file = {}
    # for key, value in news_dict.items():
    #     print(key, value)
    #     json_file[f"{key}"] = google_search(news_dict[key][0]['title'])
    #     # break
    json_file = google_search(query=None, exist_html="降息.html")
    with open('data.json', "w", encoding="utf-8") as f:
        json.dump(json_file, fp=f, ensure_ascii=False)
