# -*- coding: utf-8 -*-
"""
双一流高校一流学科数据爬取与处理脚本 147
爬取网址: https://gaokao.eol.cn/e_html/gk/2022/sylyx/index.html
功能: 爬取双一流高校及其一流学科建设名单，保存为CSV和Excel格式
"""

import requests
from bs4 import BeautifulSoup
import csv
import openpyxl
import time
import re


def crawl_syl_universities():
    """
    爬取双一流高校一流学科数据
    """
    # 设置请求头，防止被网站拦截
    headers = {
        "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
                      "AppleWebKit/537.36 (KHTML, like Gecko) "
                      "Chrome/122.0.0.0 Safari/537.36"
    }

    url = "https://gaokao.eol.cn/e_html/gk/2022/sylyx/index.html"

    try:
        print("正在获取数据...")
        resp = requests.get(url, headers=headers, timeout=15)
        resp.encoding = resp.apparent_encoding
        html = resp.text

        print("数据获取成功，开始解析...")
        soup = BeautifulSoup(html, "html.parser")
        table = soup.find("table")

        if not table:
            print("未找到数据表格")
            return []

        data = []
        current_region = None

        # 遍历所有行
        for tr in table.find_all("tr"):
            tds = tr.find_all("td")
            if not tds:
                continue

            # 跳过表头
            if len(tds) > 0 and "地区" in tds[0].get_text():
                continue

            # 处理不同情况
            if len(tds) == 4:
                # 完整的四列数据
                region = tds[0].get_text(strip=True)
                school = tds[1].get_text(strip=True)
                subjects = tds[2].get_text(strip=True)

                # 提取链接文本
                score_link = tds[3].find("a")
                if score_link:
                    score = score_link.get_text(strip=True)
                else:
                    score = tds[3].get_text(strip=True)

                if region and school:
                    data.append([region, school, subjects, score])

            elif len(tds) == 3:
                # 只有三列数据
                region = tds[0].get_text(strip=True)
                school = tds[1].get_text(strip=True)
                subjects = tds[2].get_text(strip=True)

                # 如果有地区信息，添加到数据中
                if region and school:
                    data.append([region, school, subjects, ""])

            elif len(tds) == 2:
                # 只有两列数据，使用当前地区
                if current_region:
                    school = tds[0].get_text(strip=True)
                    subjects = tds[1].get_text(strip=True)

                    if school:
                        data.append([current_region, school, subjects, ""])

            else:
                # 其他情况
                pass

        print(f"解析完成，共获取 {len(data)} 条记录")
        return data

    except Exception as e:
        print(f"爬取过程中出现错误: {e}")
        return []


def save_to_csv(data, filename="syl_universities.csv"):
    """
    保存数据到CSV文件
    """
    try:
        with open(filename, "w", newline="", encoding="utf-8-sig") as f:
            writer = csv.writer(f)
            writer.writerow(["地区", "学校名称", "一流学科建设名单", "投档线"])
            writer.writerows(data)
        print(f"✅ CSV文件已保存: {filename}")
    except Exception as e:
        print(f"保存CSV文件时出现错误: {e}")


def save_to_excel(data, filename="syl_universities.xlsx"):
    """
    保存数据到Excel文件
    """
    try:
        wb = openpyxl.Workbook()
        ws = wb.active
        ws.title = "双一流高校一流学科"

        # 写入表头
        ws.append(["序号", "学校名称", "一流学科建设名单", "投档线"])

        # 写入数据
        for row in data:
            ws.append(row)

        # 调整列宽
        ws.column_dimensions['A'].width = 10
        ws.column_dimensions['B'].width = 25
        ws.column_dimensions['C'].width = 30
        ws.column_dimensions['D'].width = 40

        wb.save(filename)
        print(f"✅ Excel文件已保存: {filename}")
    except Exception as e:
        print(f"保存Excel文件时出现错误: {e}")


def display_statistics(data):
    """
    显示数据统计信息
    """
    if not data:
        print("没有数据可显示")
        return

    # 按地区统计
    region_count = {}
    for row in data:
        region = row[0]
        if region in region_count:
            region_count[region] += 1
        else:
            region_count[region] = 1

    print("\n📊 数据统计:")
    print(f"总计: {len(data)} 所学校")
    print("\n各地区学校数量:")
    for region, count in sorted(region_count.items()):
        region_name = re.sub(r'\(.*?\)', '', region)
        print(f"  {region_name}: {count} 所")


def main():
    """
    主函数
    """
    print("🎓 双一流高校一流学科数据爬取工具")
    print("=" * 50)

    # 爬取数据
    data = crawl_syl_universities()

    if not data:
        print("❌ 未能获取有效数据")
        return

    # 显示统计信息
    display_statistics(data)

    # 保存数据
    save_to_csv(data)
    save_to_excel(data)

    # 显示前几条数据示例
    print("\n📋 数据示例:")
    print("-" * 50)
    for i, (region, school, subjects, score) in enumerate(data[:5]):
        print(f"{i + 1}. {region} - {school}")
        print(f"   学科: {subjects}")
        if score:
            print(f"   投档线: {score}")
        print()


if __name__ == "__main__":
    main()
