# scrape_eol_rank_table.py
import requests
from bs4 import BeautifulSoup
import pandas as pd

URL = "https://www.eol.cn/e_html/gk/dxpm/index.shtml"
HEADERS = {
    "User-Agent": "Mozilla/5.0 (compatible; scraper/1.0; +https://example.com)"
}

def fetch_page(url):
    r = requests.get(url, headers=HEADERS, timeout=15)
    r.encoding = r.apparent_encoding  # 自动检测编码
    r.raise_for_status()
    return r.text

# def parse_rankings(html):
#     soup = BeautifulSoup(html, "html.parser")
#     records = []
#
#     # 找所有 <tr> 里面包含 <td> 的行
#     for tr in soup.find_all("tr"):
#         tds = tr.find_all("td")
#         if len(tds) >= 3:
#             try:
#                 rank = int(tds[0].get_text(strip=True))
#                 name = tds[1].get_text(strip=True)
#                 score = float(tds[2].get_text(strip=True))
#                 records.append({"rank": rank, "name": name, "score": score})
#             except ValueError:
#                 # 遇到表头或异常行跳过
#                 continue
#     return sorted(records, key=lambda x: x["rank"])

def parse_rankings(html):
    soup = BeautifulSoup(html, "html.parser")

    # 页面上综合排名表格 class 名一般是 "bk" 或者你可以直接取第一个 <table>
    table = soup.find("table")
    records = []

    for tr in table.find_all("tr"):
        tds = tr.find_all("td")
        if len(tds) >= 3:
            try:
                rank = int(tds[0].get_text(strip=True))
                name = tds[1].get_text(strip=True)
                score = float(tds[2].get_text(strip=True))
                records.append({"rank": rank, "name": name, "score": score})
            except ValueError:
                continue

    return sorted(records, key=lambda x: x["rank"])

def main():
    html = fetch_page(URL)
    records = parse_rankings(html)

    if not records:
        print("⚠️ 没抓到数据，请检查 DOM 结构或网站是否屏蔽请求。")
        return

    # 输出前 10 条
    print("示例数据：")
    for r in records[:10]:
        print(r)

    # 保存为 CSV/Excel
    df = pd.DataFrame(records)
    df.to_csv("eol_universities.csv", index=False, encoding="utf-8-sig")
    df.to_excel("eol_universities.xlsx", index=False)
    print(f"✅ 抓取完成，共 {len(records)} 条，已保存到 eol_universities.csv / eol_universities.xlsx")

if __name__ == "__main__":
    main()
