import re
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import jieba
import jieba.analyse
import pandas as pd

# 初始化数据存储列表
dataa = []
all_keywords = []

# 定义主函数
def hot_pull():
    # 设置微博登录页面的URL
    url = 'https://weibo.com/newlogin?tabtype=search&gid=&openLoginLayer=0&url='

    # 设置Chrome选项以防止被检测为机器人
    chrome_options = Options()
    chrome_options.add_argument("--disable-blink-features=AutomationControlled")
    chrome_options.add_argument("--headless")  # 无头模式，不打开浏览器
    chrome_options.add_argument("--disable-gpu")
    chrome_options.add_argument("--no-sandbox")
    chrome_options.add_argument("--disable-dev-shm-usage")

    # 初始化Selenium WebDriver
    driver = None
    try:
        # 使用 ChromeDriverManager 安装 ChromeDriver 并获取路径
        service = Service(ChromeDriverManager().install())
        driver = webdriver.Chrome(service=service, options=chrome_options)

        driver.set_page_load_timeout(30)  # 设置页面加载超时
        driver.set_script_timeout(30)  # 设置脚本执行超时

        driver.get(url)

        # 设置Cookies
        cookies = [
            {'name': 'SUB',
             'value': '_2AkMR3qLjf8NxqwFRmfwTyW7lZI51wgHEieKnglM4JRMxHRl-yT8XqkAjtRB6Ol6MDA4npezyQRRiodc8Zv8rhw7Wcm_o',
             'domain': '.weibo.com'},
            {'name': 'SUBP', 'value': '0033WrSXqPxfM72-Ws9jqgMF55529P9D9WFYrWiBoBCS8N4X.nJZsApl',
             'domain': '.weibo.com'},
            {'name': '_s_tentry', 'value': 'weibo.com', 'domain': '.weibo.com'},
            {'name': 'Apache', 'value': '1610693552481.1394.1719807453667', 'domain': '.weibo.com'},
            {'name': 'SINAGLOBAL', 'value': '1610693552481.1394.1719807453667', 'domain': '.weibo.com'},
            {'name': 'ULV', 'value': '1719807453782:1:1:1:1610693552481.1394.1719807453667:', 'domain': '.weibo.com'},
            {'name': 'WBtopGlobal_register_version', 'value': '2024070115', 'domain': '.weibo.com'}
        ]

        for cookie in cookies:
            driver.add_cookie(cookie)

        # 重新访问目标页面
        driver.get(url)
        driver.implicitly_wait(10)

        # 获取页面内容
        html0 = driver.page_source

        # 使用BeautifulSoup解析HTML内容
        soup = BeautifulSoup(html0, 'html.parser')
        # 查找所有包含热点话题标题的<div>标签
        ddd = soup.find_all('div', class_='woo-box-flex woo-box-alignCenter HotTopic_titout_1CFlj')

        print("爬取到热榜标题：")
        # 遍历所有找到的<div>标签
        for d in ddd:
            dic = {}  # 初始化字典以存储数据
            dic['url'] = url  # 存储当前URL
            # 查找<div>标签内包含热点话题标题的具体<a>标签
            p_tag = d.find('a', class_='ALink_default_2ibt1 HotTopic_tit_eS4fv')
            if p_tag:  # 如果找到标题标签
                p = p_tag.get_text()  # 获取标题文本
                dic['title'] = p  # 存储标题到字典
                print(p)  # 打印标题
                dataa.append(dic)  # 将字典添加到数据存储列表

                # 提取关键词并存储
                keywords = jieba.analyse.extract_tags(p, topK=5)  # 提取前5个关键词
                filtered_keywords = [kw for kw in keywords if not re.match(r'^[0-9]+$', kw) and not re.match(r'^[a-zA-Z]+$', kw)]
                for kw in filtered_keywords:
                    all_keywords.append((kw, p))  # 将关键词和标题一起添加到汇总列表
            else:
                print("No title found")  # 如果未找到标题标签，打印提示信息

        # 将数据转换为pandas DataFrame
        df = pd.DataFrame(dataa)

        # 添加自增的“id”列
        df['id'] = range(1, len(df) + 1)

        # 保存为Excel文件
        df.to_excel('../public/data/tables/title.xlsx', index=False)
        print("热榜标题数据已存入：title.xlsx")
    except Exception as e:
        print(f"An error occurred: {e}")
    finally:
        # 确保在最终关闭WebDriver
        if driver:
            driver.quit()

    print("关键词数：", len(all_keywords))
    return all_keywords

# 主函数入口
if __name__ == '__main__':
    hot_pull()  # 调用主函数
