import pandas as pd
import requests

def fetch_weather_data(urls):
    results = []  # 用于存储所有爬取的数据
    for url in urls:
        try:
            # 发送请求获取HTML内容
            response = requests.get(url)
            response.raise_for_status()  # 如果响应状态码不是200，将引发HTTPError异常
            html_content = response.text
            # 使用pandas读取HTML表格数据
            dfs = pd.read_html(html_content)
            # 确保我们获取了正确的表格
            if len(dfs) > 0:
                # 假设第一个表格是我们需要的，将其转换为DataFrame
                df = dfs[0]
                # 根据URL生成Excel文件名
                excel_name = "weather_data_" + url.split("/")[-5]+url.split("/")[-4]+url.split("/")[-3]+url.split("/")[-2]
                # 将DataFrame保存到Excel文件中
                with pd.ExcelWriter(excel_name + ".xlsx") as writer:
                    df.to_excel(writer, index=False, sheet_name=excel_name)
                print(f"数据已保存到Excel文件中（{url}）。")
                results.append(df)  # 将爬取的数据添加到结果列表中
            else:
                print(f"未能解析表格数据（{url}）。")
        except requests.HTTPError as http_err:
            print(f'HTTP error occurred: {http_err}（{url}）')
        except Exception as err:
            print(f'An error occurred: {err}（{url}）')
    # 将所有爬取的数据合并为一个DataFrame
    if results:
        combined_df = pd.concat(results, ignore_index=True)
        return combined_df
    else:
        print("没有可用的数据.")
        return None

# 使用封装后的函数
# 定义URL模板
url_template = "https://www.wx-now.com/Archival/ZGSZ/{year}/{month}/{day}/{hour}/00"

# 生成URL列表
urls = [
    url_template.format(year=2024, month=month, day=day, hour=hour)
    for month in range(4, 6)  # 4月到5月
    for day in range(1, 30)   # 19号到29号，假设每个月都有30天
    for hour in range(0, 24)   # 0点到23点
]

combined_df = fetch_weather_data(urls)
