import requests
import pandas as pd
from bs4 import BeautifulSoup
import re
import html
import time
from tqdm import tqdm


def decode_weather_text(text):
    """处理天气相关文本"""
    if not isinstance(text, str):
        return text

    # HTML实体解码
    text = html.unescape(text)

    # 处理温度符号（如-3°C/-7°C）
    text = re.sub(r'(-?\d+)\s*[°度]?[Cc]?\s*/\s*(-?\d+)\s*[°度]?[Cc]?', r'\1/\2', text)

    # 去除多余空格和特殊字符
    text = re.sub(r'[�♦帧旋幅跳缝随排横低气箸]', '', text)

    # 将"级"统一化（如果有特殊字符表示级）
    text = text.replace('箸', '级')

    # 去除多余空格
    return ' '.join(text.split()).strip()


def scrape_monthly_weather(year, month):
    """爬取单月天气数据（UTF-8版）"""
    url = f"https://www.tianqihoubao.com/lishi/dalian/month/{year}{month:02d}.html"
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
        'Accept-Encoding': 'gzip'
    }

    try:
        # 1. 获取数据（使用UTF-8解码）
        response = requests.get(url, headers=headers, timeout=20)
        response.encoding = 'utf-8'

        # 2. 解析HTML
        soup = BeautifulSoup(response.text, 'html.parser')
        table = soup.find('table', class_=lambda x: x and 'table' in x.lower())

        if not table:
            print(f"未找到表格: {year}-{month}")
            return pd.DataFrame()

        # 3. 提取数据
        data = []
        for row in table.find_all('tr'):
            cols = [col.get_text(strip=True) for col in row.find_all(['td', 'th'])]
            if len(cols) >= 4:
                # 清洗并解码每个字段
                date = decode_weather_text(cols[0])
                weather = decode_weather_text(cols[1])
                temp = decode_weather_text(cols[2])
                wind = decode_weather_text(cols[3])

                # 解析日期
                date_match = re.search(r'(\d{4})\D*(\d{1,2})\D*(\d{1,2})', date)
                if not date_match:
                    continue
                clean_date = f"{date_match.group(1)}-{date_match.group(2).zfill(2)}-{date_match.group(3).zfill(2)}"

                # 解析天气
                weather_parts = weather.split('/')
                weather_day = weather_parts[0].strip() if len(weather_parts) > 0 else ''
                weather_night = weather_parts[1].strip() if len(weather_parts) > 1 else ''

                # 解析温度
                temp_parts = re.findall(r'-?\d+', temp)
                temp_high = temp_parts[0] if len(temp_parts) > 0 else None
                temp_low = temp_parts[1] if len(temp_parts) > 1 else None

                # 解析风力
                wind_parts = wind.split('/')
                wind_day = wind_parts[0].strip() if len(wind_parts) > 0 else ''
                wind_night = wind_parts[1].strip() if len(wind_parts) > 1 else ''

                data.append([
                    clean_date, weather_day, weather_night,
                    temp_high, temp_low,
                    wind_day, wind_night
                ])

        # 4. 转换为DataFrame
        columns = ['日期', '天气_白天', '天气_夜间', '最高温', '最低温', '风力_白天', '风力_夜间']
        df = pd.DataFrame(data, columns=columns)

        # 5. 数据类型转换
        df['日期'] = pd.to_datetime(df['日期'], errors='coerce')
        df[['最高温', '最低温']] = df[['最高温', '最低温']].apply(pd.to_numeric, errors='coerce')

        return df.dropna(subset=['日期'])

    except Exception as e:
        print(f"爬取 {year}-{month} 失败: {str(e)}")
        return pd.DataFrame()


def scrape_full_data(years=[2022, 2023, 2024]):
    """爬取多年数据"""
    all_data = pd.DataFrame()

    for year in years:
        print(f"\n开始爬取 {year} 年数据...")
        yearly_data = pd.DataFrame()

        for month in tqdm(range(1, 13), desc=f'{year}年'):
            monthly_data = scrape_monthly_weather(year, month)
            if not monthly_data.empty:
                yearly_data = pd.concat([yearly_data, monthly_data], ignore_index=True)
            time.sleep(1)  # 礼貌爬取间隔

        if not yearly_data.empty:
            all_data = pd.concat([all_data, yearly_data], ignore_index=True)

    if not all_data.empty:
        # 最终数据清洗
        all_data = all_data.sort_values('日期').reset_index(drop=True)
        all_data.to_csv('dalian_weather_2022_2024.csv', index=False, encoding='utf-8')
        print("\n✅ 数据爬取完成！")
        print(f"共获取 {len(all_data)} 条记录")
        print("数据示例：")
        print(all_data.head())
    else:
        print("\n❌ 数据爬取失败")


def scrape_compare_data(year=2025):
    """爬取多年数据"""
    all_data = pd.DataFrame()

    print(f"\n开始爬取 {year} 年1-6月数据...")

    for month in tqdm(range(1, 7), desc=f'{year}年'):
        monthly_data = scrape_monthly_weather(year, month)
        if not monthly_data.empty:
            all_data = pd.concat([all_data, monthly_data], ignore_index=True)
        time.sleep(1)  # 礼貌爬取间隔

    if not all_data.empty:
        # 最终数据清洗
        all_data = all_data.sort_values('日期').reset_index(drop=True)
        all_data.to_csv('dalian_weather_2025.csv', index=False, encoding='utf-8')
        print("\n✅ 数据爬取完成！")
        print(f"共获取 {len(all_data)} 条记录")
        print("数据示例：")
        print(all_data.head())
    else:
        print("\n❌ 数据爬取失败")


if __name__ == '__main__':
    scrape_full_data()
    scrape_compare_data()