import requests
from bs4 import BeautifulSoup
import re
import datetime

def get_webpage_text(url, output_file='nmc_forecast.txt'):
    """
    获取网页文本内容并保存为txt文件
    
    Parameters:
    url (str): 目标网页URL
    output_file (str): 输出文件名
    """
    
    try:
        # 设置请求头，模拟浏览器访问
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        
        # 发送GET请求
        print("正在获取网页内容...")
        response = requests.get(url, headers=headers, timeout=10)
        response.encoding = 'utf-8'  # 设置编码为utf-8
        
        # 检查请求是否成功
        if response.status_code == 200:
            print("网页获取成功，正在解析内容...")
            
            # 使用BeautifulSoup解析HTML
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # 移除不需要的标签（脚本、样式等）
            for script in soup(["script", "style", "nav", "footer", "header", "aside"]):
                script.decompose()
            
            # 获取纯文本内容
            text = soup.get_text()
            
            # 清理文本：移除多余的空行和空白字符
            lines = (line.strip() for line in text.splitlines())
            chunks = (phrase.strip() for line in lines for phrase in line.split("  "))
            cleaned_text = '\n'.join(chunk for chunk in chunks if chunk)
            
            # 保存到文件
            with open(output_file, 'w', encoding='utf-8') as f:
                f.write(f"网页URL: {url}\n")
                f.write(f"获取时间: {response.headers.get('Date', '未知')}\n")
                f.write("="*50 + "\n\n")
                f.write(cleaned_text)
            
            print(f"内容已成功保存到 {output_file}")
            return True
            
        else:
            print(f"请求失败，状态码: {response.status_code}")
            return False
            
    except requests.exceptions.RequestException as e:
        print(f"网络请求错误: {e}")
        return False
    except Exception as e:
        print(f"处理过程中出现错误: {e}")
        return False


import pandas as pd
import re


def extract_weather_data(text):
    """从文本中提取城市天气数据"""

    # 查找热门城市部分
    pattern = r'热门城市(.*?)(?=网页结束|$)'
    match = re.search(pattern, text, re.DOTALL)

    if not match:
        return pd.DataFrame()

    cities_section = match.group(1)

    # 提取城市天气信息
    weather_pattern = r'([\u4e00-\u9fff]+?)\s*[\n\r]\s*([^\d]+?)\s*(\d+)℃\s*/\s*(\d+)℃'
    matches = re.findall(weather_pattern, cities_section)

    weather_data = []
    for match in matches:
        city, weather, temp_high, temp_low = match
        weather_data.append({
            '城市': city.strip(),
            '天气': weather.strip(),
            '最高温度(℃)': int(temp_high),
            '最低温度(℃)': int(temp_low),
            '温度范围': f"{temp_high}℃/{temp_low}℃"
        })

    return pd.DataFrame(weather_data)


def save_weather_data_to_txt(df, filename='城市天气数据.txt'):
    """将天气数据保存为txt文件"""

    if df.empty:
        print("未提取到天气数据")
        return False

    try:
        # 创建格式化的文本内容
        output_text = "城市天气数据报告\n"
        output_text += "=" * 50 + "\n"
        output_text += f"数据来源: {text_content.split('网页URL: ')[1].split()[0]}\n"
        output_text += f"获取时间: {text_content.split('获取时间: ')[1].split()[0]}\n"
        output_text += "=" * 50 + "\n\n"

        # 添加表格格式的数据
        output_text += "序号\t城市\t天气\t最高温度\t最低温度\t温度范围\n"
        output_text += "-" * 60 + "\n"

        for i, row in df.iterrows():
            output_text += f"{i + 1}\t{row['城市']}\t{row['天气']}\t{row['最高温度(℃)']}℃\t{row['最低温度(℃)']}℃\t{row['温度范围']}\n"

        output_text += "\n" + "=" * 50 + "\n"
        output_text += f"总计: {len(df)} 个城市的天气数据\n"

        # 保存到文件
        with open(filename, 'w', encoding='utf-8') as f:
            f.write(output_text)

        print(f"天气数据已保存到 {filename}")
        return True

    except Exception as e:
        print(f"保存文件时出错: {e}")
        return False



if __name__ == "__main__":
    url = "https://www.nmc.cn/publish/forecast.html"
    output_filename = "中央气象台天气预报.txt"
    
    success = get_webpage_text(url, output_filename)
    
    if success:
        # 读取并显示部分内容预览
        try:
            with open(output_filename, 'r', encoding='utf-8') as f:
                content = f.read(500)  # 读取前500个字符预览
                print("\n文件内容预览:")
                print("="*30)
                print(content)
                print("..." if len(content) >= 500 else "")
        except:
            print("无法读取生成的文件")
    else:
        print("网页内容获取失败")
    text_content = open(output_filename, 'r', encoding='utf-8').read()
    # 提取天气数据
    weather_df = extract_weather_data(text_content)

    if not weather_df.empty:
        print("提取到的天气数据:")
        print("=" * 50)
        print(weather_df.to_string(index=False))
        time = datetime.datetime.now().strftime("%Y%m%d")
        # 保存为txt文件
        save_weather_data_to_txt(weather_df, f'{time}-城市天气数据报告.txt')

        # 显示统计信息
        print(f"\n统计信息:")
        print(f"城市数量: {len(weather_df)}")
        print(f"最高温度范围: {weather_df['最高温度(℃)'].min()}℃ - {weather_df['最高温度(℃)'].max()}℃")
        print(f"最低温度范围: {weather_df['最低温度(℃)'].min()}℃ - {weather_df['最低温度(℃)'].max()}℃")

    else:
        print("未能从文本中提取到天气数据")
    # 使用示例