import requests
import pandas as pd
from io import StringIO
import time
import random

url = "https://tianqi.2345.com/Pc/GetHistory"

headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/137.0.0.0 Safari/537.36",
    "Referer": "https://tianqi.2345.com/wea_history/60247.htm"
}


def craw_table(year, month):
    params = {
        "areaInfo[areaId]": 60247,
        "areaInfo[areaType]": 2,
        "date[year]": year,
        "date[month]": month
    }
    try:
        response = requests.get(url, params=params, headers=headers, timeout=10)
        html_data = response.json()["data"]
        if "<table" not in html_data:
            print(f"[警告] {year}年{month}月 无有效表格")
            return None
        df = pd.read_html(StringIO(html_data))[0]
        return df
    except Exception as e:
        print(f"[错误] {year}年{month}月 抓取失败：{e}")
        return None


df_list = []
for year in range(2022, 2023):  # 修改为想抓取的年份
    for month in range(5, 6):
        print(f"正在爬取：{year}年{month}月...")
        df = craw_table(year, month)
        if df is not None:
            df["年份"] = year
            df["月份"] = month
            df_list.append(df)
        # 添加随机延迟，避免被封
        delay = random.uniform(1.5, 3.5)  # 随机等待 1.5~3.5 秒
        print(f"等待 {delay:.2f} 秒以防风控...\n")
        time.sleep(delay)

# 合并保存
if df_list:
    all_data = pd.concat(df_list, ignore_index=True)
    all_data.to_excel("data/北京密云天气_202205.xlsx", index=False)
    print("保存成功 ✅")
else:
    print("❌ 无有效数据，未生成文件")
