import requests
from bs4 import BeautifulSoup
import pandas as pd
import time
import random
from urllib.parse import urljoin
import re
from datetime import datetime, timedelta
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.service import Service
import os

# 手动定义一组User-Agent
USER_AGENTS = [
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
    "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:89.0) Gecko/20100101 Firefox/89.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
]

def get_headers():
    return {
        'User-Agent': random.choice(USER_AGENTS),
        'Referer': 'https://lishi.tianqi.com/',
        'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
        'Accept-Language': 'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1'
    }

def get_city_links():
    """只获取衢州市区的链接"""
    return ['https://lishi.tianqi.com/quzhou/index.html']

def get_weather_data(city_url):
    """使用Selenium获取完整的每日天气数据"""
    driver = None
    try:
        # 配置Chrome选项
        chrome_options = Options()
        chrome_options.add_argument('--headless')  # 无头模式
        chrome_options.add_argument('--disable-gpu')
        chrome_options.add_argument('--no-sandbox')
        chrome_options.add_argument('--disable-dev-shm-usage')
        chrome_options.add_argument(f'user-agent={random.choice(USER_AGENTS)}')
        
        # 指定ChromeDriver路径
        chromedriver_path = r"C:\Program Files (x86)\Google\Chrome\Application\chromedriver-win64\chromedriver.exe"
        service = Service(executable_path=chromedriver_path)
        
        # 初始化WebDriver
        driver = webdriver.Chrome(service=service, options=chrome_options)
        driver.get(city_url)
        
        # 等待页面加载完成
        WebDriverWait(driver, 10).until(
            EC.presence_of_element_located((By.CLASS_NAME, "tian_three"))
        )
        
        # 查找并点击"查看更多"按钮
        try:
            more_button = driver.find_element(By.CLASS_NAME, "lishidesc2")
            if more_button.is_displayed():
                driver.execute_script("arguments[0].click();", more_button)
                time.sleep(2)  # 等待数据加载
        except Exception as e:
            print(f"点击'查看更多'按钮时出错: {e}")
        
        # 获取完整页面内容
        page_source = driver.page_source
        
        # 解析数据
        soup = BeautifulSoup(page_source, 'html.parser')
        daily_data = []
        
        # 使用CSS选择器查找所有天气数据
        all_lis = soup.select('div.tian_three li')
        
        if not all_lis:
            print(f"未找到任何天气数据: {city_url}")
            return []
        
        # 遍历每个li标签，提取每日数据
        for li in all_lis:
            divs = li.find_all('div')
            if len(divs) >= 5:
                date_str = divs[0].get_text(strip=True)
                max_temp = divs[1].get_text(strip=True)
                min_temp = divs[2].get_text(strip=True)
                weather = divs[3].get_text(strip=True)
                wind = divs[4].get_text(strip=True)
                
                # 检查是否是有效的日期数据
                if re.match(r'\d{4}-\d{2}-\d{2}', date_str):
                    daily_data.append({
                        '日期': date_str,
                        '最高气温': max_temp,
                        '最低气温': min_temp,
                        '天气': weather,
                        '风向': wind
                    })
        
        return daily_data
    except Exception as e:
        print(f"获取天气数据失败({city_url}): {e}")
        return []
    finally:
        if driver:
            try:
                driver.quit()
            except:
                pass

def save_to_csv(data, filename):
    """保存数据到CSV文件"""
    if not data:
        print("没有数据可保存")
        return

    df = pd.DataFrame(data)
    df.to_csv(filename, index=False, encoding='utf_8_sig')
    print(f"数据已保存到 {filename}")

def get_all_month_urls(city_url, recent_years=1):
    """根据URL规则生成所有可用的年月url，只保留近几年的"""
    generated_urls = []
    base_url_prefix = city_url.replace('/index.html', '1/')

    current_date = datetime.now()
    start_date = current_date - timedelta(days=365)
    
    temp_date = datetime(start_date.year, start_date.month, 1)
    
    while temp_date <= current_date:
        year = temp_date.year
        month = temp_date.month
        month_str = str(month).zfill(2)
        generated_urls.append(f"{base_url_prefix}{year}{month_str}.html")
        
        if month == 12:
            temp_date = datetime(year + 1, 1, 1)
        else:
            temp_date = datetime(year, month + 1, 1)
            
    generated_urls = sorted(list(set(generated_urls)))
    return generated_urls

def main():
    print("正在获取城市链接...")
    city_links = get_city_links()
    print(f"共找到 {len(city_links)} 个城市链接")

    all_data = []

    for city_url in city_links:
        print(f"正在获取所有可用月份url: {city_url}")
        month_urls = get_all_month_urls(city_url, recent_years=1)
        print(f"共找到 {len(month_urls)} 个可用月份")
        for url in month_urls:
            print(f"正在处理: {url}")
            weather_data = get_weather_data(url)
            if weather_data:
                all_data.extend(weather_data)
            time.sleep(random.uniform(2, 4))

    if all_data:
        save_to_csv(all_data, 'historical_weather_daily.csv')
    else:
        print("没有获取到任何数据")

if __name__ == '__main__':
    main()