import requests
from bs4 import BeautifulSoup
import time
import csv

BASE_URL = "http://www.air-level.com"
HEADERS = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'}
REQUEST_TIMEOUT = 10
DELAY = 1  # 请求间隔防止反爬


def get_all_cities():
    """获取所有城市链接及名称"""
    try:
        response = requests.get(BASE_URL, headers=HEADERS, timeout=REQUEST_TIMEOUT)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, "html.parser")

        # 获取更新时间
        update_time = soup.find('h4').get_text(strip=True)
        print(f"数据更新时间：{update_time}")

        # 更稳健的定位方式
        city_links = soup.select('#citylist a')
        cities = [(link.get_text(strip=True), link['href']) for link in city_links]

    except Exception as e:
        print(f"获取城市列表失败：{str(e)}")
        return []

    print(f"共发现 {len(cities)} 个城市")
    return cities


def get_city_aqi(city_name, city_url):
    """获取单个城市的AQI数据"""
    full_url = BASE_URL + city_url

    try:
        time.sleep(DELAY)
        response = requests.get(full_url, headers=HEADERS, timeout=REQUEST_TIMEOUT)
        response.raise_for_status()

        # 恢复原始定位逻辑
        soup = BeautifulSoup(response.text, "html.parser")
        aqi_span = soup.find("span")

        if aqi_span and aqi_span.string:
            return (city_name, aqi_span.string.strip())
        else:
            return (city_name, "数据解析失败")

    except requests.exceptions.HTTPError as e:
        return (city_name, f"HTTP错误 {e.response.status_code}")
    except Exception as e:
        print(f"获取 {city_name} 数据时发生异常：{str(e)}")
        return (city_name, "获取失败")


def save_to_csv(data, filename="aqi_data.csv"):
    """保存数据到CSV文件"""
    with open(filename, 'w', newline='', encoding='utf-8') as f:
        writer = csv.writer(f)
        writer.writerow(["城市名称", "AQI值"])
        writer.writerows(data)
    print(f"数据已保存到 {filename}")


def main():
    cities = get_all_cities()
    if not cities:
        print("无法获取城市列表，程序终止")
        return

    aqi_data = []
    for index, (city_name, city_url) in enumerate(cities, 1):
        result = get_city_aqi(city_name, city_url)
        aqi_data.append(result)
        print(f"{index}/{len(cities)} {result[0]}: {result[1]}")

    save_to_csv(aqi_data)


if __name__ == "__main__":
    main()