# scrape_eol_rank.py
import requests
from bs4 import BeautifulSoup
import re
import csv
import json
import pandas as pd

URL = "https://www.eol.cn/e_html/gk/dxpm/index.shtml"
HEADERS = {
    "User-Agent": "Mozilla/5.0 (compatible; scraper/1.0; +https://example.com)"
}

def fetch_page(url):
    r = requests.get(url, headers=HEADERS, timeout=15)
    r.encoding = r.apparent_encoding
    r.raise_for_status()
    return r.text

def parse_rankings(html):
    soup = BeautifulSoup(html, "html.parser")

    # 尝试先从页面直接取可视文本并逐行用正则抓取 "排名 学校 名称 得分"
    text = soup.get_text("\n")
    lines = [ln.strip() for ln in text.splitlines() if ln.strip()]

    records = []
    # 正则：开头数字（排名） + 学校名称（非数字串） + 分数（数字可含小数）
    pattern = re.compile(r'^\s*(\d{1,3})\s+([^\d\n]+?)\s+([0-9]+(?:\.[0-9]+)?)\s*$')

    for ln in lines:
        m = pattern.match(ln)
        if m:
            rank = int(m.group(1))
            name = m.group(2).strip()
            score = float(m.group(3))
            records.append({"rank": rank, "name": name, "score": score})
    # 如果 records 很少，尝试另一种从表格/列表抓取策略
    if len(records) < 50:
        # 尝试查找表格或 <li> 列表项
        # 常见结构：table 或 div 列表，遍历所有文本块寻找形如 "1 清华大学 992.6"
        all_text = soup.get_text("|||")
        candidates = re.findall(r'(\d{1,3})\s+([^\d\|]{2,80})\s+([0-9]+(?:\.[0-9]+)?)', all_text)
        for rnk, nm, sc in candidates:
            records.append({"rank": int(rnk), "name": nm.strip(), "score": float(sc)})
        # 去重并按 rank 排序
        seen = {}
        out = []
        for rec in sorted(records, key=lambda x: x["rank"]):
            if rec["rank"] not in seen:
                out.append(rec)
                seen[rec["rank"]] = True
        records = out

    # 最后按排名排序并返回
    records = sorted(records, key=lambda x: x["rank"])
    return records

def save_csv(records, filename="eol_universities.csv"):
    keys = ["rank", "name", "score"]
    with open(filename, "w", newline="", encoding="utf-8-sig") as f:
        writer = csv.DictWriter(f, fieldnames=keys)
        writer.writeheader()
        for r in records:
            writer.writerow(r)

def save_json(records, filename="eol_universities.json"):
    with open(filename, "w", encoding="utf-8") as f:
        json.dump(records, f, ensure_ascii=False, indent=2)

def main():
    html = fetch_page(URL)
    records = parse_rankings(html)
    print(f"抓取到 {len(records)} 条记录，示例：", records[:10])
    if records:
        save_csv(records)
        save_json(records)
        # 也展示为 DataFrame 并保存为 Excel（可选）
        df = pd.DataFrame(records)
        df.to_excel("eol_universities.xlsx", index=False)
        print("已保存：eol_universities.csv, eol_universities.json, eol_universities.xlsx")
    else:
        print("未抓到有效记录，请检查页面结构或网络请求是否被拦截。")

if __name__ == "__main__":
    main()
