import requests
from requests.exceptions import RequestException


def crawl_pxu_website():
    # 目标URL
    url = 'http://www.pxc.jx.cn/'

    # 设置请求头，包括UA伪装
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
    }

    try:
        # 发送HTTP GET请求
        response = requests.get(url, headers=headers)

        # 检查响应状态码
        if response.status_code == 200:
            # 自动检测编码，如果检测不到则使用备选编码
            response.encoding = response.apparent_encoding if response.apparent_encoding else 'utf-8'

            # 保存到文件
            save_path = r'D:\萍乡学院.html'
            with open(save_path, 'w', encoding='utf-8') as f:
                f.write(response.text)

            print(f"页面已成功保存到 {save_path}")
            return True
        else:
            print(f"请求失败，状态码: {response.status_code}")
            return False

    except RequestException as e:
        print(f"请求过程中发生错误: {e}")
        return False


if __name__ == '__main__':
    crawl_pxu_website()