import csv
import urllib.request
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
import time


def fetch_weather(url):
    header = ("User-Agent",
              "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36")
    opener = urllib.request.build_opener()
    opener.addheaders = [header]
    urllib.request.install_opener(opener)

    try:
        request = urllib.request.Request(url)
        response = urllib.request.urlopen(request)
        html = response.read().decode('utf-8')
    except Exception as e:
        print(f"请求数据时发生错误: {e} URL: {url}")
        return []

    final = []
    bs = BeautifulSoup(html, "html.parser")

    location_name = bs.find('h1').text.strip()
    ul = bs.find('ul', class_='weaul')

    if ul is None:
        print(f"没有找到天气预报的 ul 部分，网页结构可能已更改 URL: {url}")
        return []

    li_tags = ul.find_all('li')
    for i, li in enumerate(li_tags):
        if i < 7:  # 只关注前7天的数据
            date = li.find('span', class_='fl').text.strip()
            weather = li.find('div', class_='weaul_z').text.strip()

            temp_range = li.find_all('div', class_='weaul_z')[1]
            temp_values = temp_range.find_all('span')
            if len(temp_values) == 2:
                temp_low = temp_values[0].text.strip()
                temp_high = temp_values[1].text.strip()
            else:
                temp_low = temp_high = 'N/A'

            final.append([location_name, date, weather, temp_low, temp_high])

    return final


def update_weather_data():
    """更新天气数据并写入文件"""
    # 要爬取的多个网站 URL 列表
    urls = [
        "https://www.tianqi.com/baoding/7/",
        "http://www.tianqi.com/beijing/7/",
        "http://www.tianqi.com/zunhua/7/",
        "http://www.tianqi.com/shenzhen/7/"
    ]

    all_weather_data = []

    # 使用 ThreadPoolExecutor 来并行请求多个网站
    with ThreadPoolExecutor(max_workers=4) as executor:
        results = executor.map(fetch_weather, urls)

    # 合并所有网站的数据
    for result in results:
        all_weather_data.extend(result)

    # 将数据写入CSV文件
    with open('weather.txt', 'w', newline='', encoding='utf-8') as f:
        f_csv = csv.writer(f)
        f_csv.writerow(['地区', '日期', '天气状况', '最低温度', '最高温度'])  # 写入表头
        f_csv.writerows(all_weather_data)

    print("天气数据已成功写入 weather.txt")


def main():
    while True:
        update_weather_data()
        time.sleep(43200)  # 每43200秒（12小时）爬取一次


if __name__ == '__main__':
    main()