import re
import time as delay
from bs4 import BeautifulSoup
import pandas as pd
import json
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import sys
import os
# 将项目根目录添加到 sys.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# 现在可以正常导入
from userUtils.query import save_to_mysql

from selenium import webdriver
from selenium.webdriver.edge.service import Service  # Edge 专用 Service
from selenium.webdriver.edge.options import Options  # Edge 专用 Options

# 替换为你的 EdgeDriver 路径（或使用自动管理工具）
EDGEDRIVER_PATH = r"F:\\desktop\\bigdata\\weather-master\\msedgedriver.exe"  # 手动指定路径
# 或者使用 webdriver-manager 自动管理（推荐）：
# from webdriver_manager.microsoft import EdgeChromiumDriverManager

def init_driver():
    """初始化 Edge 浏览器驱动"""
    # 方法 1：手动指定 EdgeDriver 路径
    service = Service(executable_path=EDGEDRIVER_PATH)
    
    # 方法 2（推荐）：自动下载匹配的 EdgeDriver（需安装 webdriver-manager）
    # service = Service(EdgeChromiumDriverManager().install())

    # 配置 Edge 选项
    options = Options()
    options.add_argument("--headless")  # 无头模式
    options.add_argument("--disable-gpu")  # 禁用 GPU 加速（某些系统需要）
    options.add_argument("--no-sandbox")   # 禁用沙箱（Linux/Docker 可能需要）

    # 初始化 Edge 浏览器
    driver = webdriver.Edge(service=service, options=options)
    return driver







def get_page_with_selenium(driver, url):
    """使用Selenium获取完整页面（包括动态加载的内容）"""
    driver.get(url)

    # 尝试点击"查看更多"按钮（可能需要根据实际网站调整）
    while True:
        try:
            # 等待"查看更多"按钮出现（最多10秒）
            more_button = WebDriverWait(driver, 10).until(
                EC.presence_of_element_located((By.CLASS_NAME, "lishidesc2"))
            )# 需要根据实际网站调整选择器

            # 滚动到按钮位置
            driver.execute_script("arguments[0].scrollIntoView();", more_button),
            delay.sleep(1),

            # 点击按钮
            more_button.click(),
            delay.sleep(2)  # 等待数据加载
        except:
            # 没有找到更多按钮或出现其他异常，退出循环
            break

    return driver.page_source
def parse_page(html):
    # 创建空列表，用于存储数据
    date_box=[]
    max_temp=[]
    min_temp=[]
    weh=[]
    wind=[]
    # 使用 BeautifulSoup 解析网页
    bs=BeautifulSoup(html,'html.parser')
    # 找到包含数据的标签
    data=bs.find_all(class_='thrui')
    # 使用正则表达式提取数据
    date = re.compile('class="th200">(.*?)</div>')
    tem = re.compile('class="th140">(.*?)</div>')
    time = re.findall(date, str(data))

    for item in time:
        # 不提取星期数据
        date_box.append(item[:10])
    temp = re.findall(tem, str(data))
    for i in range(len(temp) // 4):
        max_temp.append(temp[i * 4 + 0])
        min_temp.append(temp[i * 4 + 1])
        weh.append(temp[i * 4 + 2])
        wind.append(temp[i * 4 + 3])

    # 将数据转换为数据框，不添加星期列
    datas=pd.DataFrame({'日期':date_box,'最高温度':max_temp,'最低温度':min_temp,'天气':weh,'风向':wind })
    return datas


def crawl_weather(city, code, time, driver):
    """修改后的爬取函数，使用Selenium"""
    url = f"http://lishi.tianqi.com/{code}/{time}.html"
    print(f"正在爬取: {city} - {time}")

    try:
        # 使用Selenium获取完整页面
        html = get_page_with_selenium(driver, url)

        # 解析数据
        datas = parse_page(html)

        # 保存数据
        save_to_mysql(datas, city)
        print(f"成功爬取 {city} 的 {time} 的历史天气数据")

    except Exception as e:
        print(f"爬取 {city} 的 {time} 历史天气数据失败：{e}")

    delay.sleep(5)  # 礼貌爬取，避免被封


# 主程序
if __name__ == "__main__":
    # 初始化浏览器驱动
    driver = init_driver()

    try:
        # 读取城市数据
        with open(r'F:\\desktop\\bigdata\\weather-master\\spider\\city.json', 'r', encoding='utf-8') as f:
            city_dict = json.load(f)

        # 时间列表（保持不变）
        time_list = [
                    "201501","201502","201503","201504","201505","201506","201507","201508","201509", "201510","201511","201512",
                    "201601","201602","201603","201604","201605","201606","201607","201608","201609","201610","201611","201612",
                    "201701","201702","201703","201704","201705","201706","201707","201708","201709","201710","201711","201712",
                    "201801","201802","201803","201804","201805","201806","201807","201808","201809","201810","201811","201812",
                    "201901","201902","201903","201904","201905","201906","201907","201908","201909","201910","201911","201912",
                    "202001","202002","202003","202004","202005","202006","202007","202008","202009","202010","202011","202012",
                    "202101","202102","202103","202104","202105","202106","202107","202108","202109","202110","202111","202112",
                    "202201","202202","202203","202204","202205","202206","202207", "202208","202209","202210","202211","202212",
                    "202301","202302","202303","202304","202305","202306","202307","202308","202309","202310","202311","202312",
                    "202401","202402","202403","202404","202405","202406","202407","202408","202409","202410","202411","202412",
                    "202501","202502","202503","202504","202505","202506"
                    ] 

        # 遍历城市和时间
        for city, code in city_dict.items():
            for time in time_list:
                crawl_weather(city, code, time, driver)

    finally:
    # 确保浏览器驱动被关闭
        driver.quit()