# scrape_eol_rank_table.py
import requests
from bs4 import BeautifulSoup
import pandas as pd

URL = "https://www.eol.cn/e_html/gk/dxpm/index.shtml"
HEADERS = {
    "User-Agent": "Mozilla/5.0 (compatible; scraper/1.0; +https://example.com)"
}

def fetch_page(url):
    r = requests.get(url, headers=HEADERS, timeout=15)
    r.encoding = r.apparent_encoding  # 自动检测编码
    r.raise_for_status()
    return r.text

# def parse_rankings(html):
#     soup = BeautifulSoup(html, "html.parser")
#     records = []
#
#     # 找所有 <tr> 里面包含 <td> 的行
#     for tr in soup.find_all("tr"):
#         tds = tr.find_all("td")
#         if len(tds) >= 3:
#             try:
#                 rank = int(tds[0].get_text(strip=True))
#                 name = tds[1].get_text(strip=True)
#                 score = float(tds[2].get_text(strip=True))
#                 records.append({"rank": rank, "name": name, "score": score})
#             except ValueError:
#                 # 遇到表头或异常行跳过
#                 continue
#     return sorted(records, key=lambda x: x["rank"])

# def parse_rankings(html):
#     soup = BeautifulSoup(html, "html.parser")
#
#     # 页面上综合排名表格 class 名一般是 "bk" 或者你可以直接取第一个 <table>
#     table = soup.find("table")
#     records = []
#
#     for tr in table.find_all("tr"):
#         tds = tr.find_all("td")
#         if len(tds) >= 3:
#             try:
#                 rank = int(tds[0].get_text(strip=True))
#                 name = tds[1].get_text(strip=True)
#                 score = float(tds[2].get_text(strip=True))
#                 records.append({"rank": rank, "name": name, "score": score})
#             except ValueError:
#                 continue
#
#     return sorted(records, key=lambda x: x["rank"])
# def parse_rankings(html):
#     soup = BeautifulSoup(html, "html.parser")
#
#     tables = soup.find_all("table")
#     all_records = []
#
#     for idx, table in enumerate(tables, start=1):
#         records = []
#         for tr in table.find_all("tr"):
#             tds = tr.find_all("td")
#             if len(tds) >= 2:
#                 rank_text = tds[0].get_text(strip=True)
#                 name = tds[1].get_text(strip=True)
#                 score = None
#                 if len(tds) >= 3:
#                     score_text = tds[2].get_text(strip=True)
#                     score = float(score_text) if score_text else None
#
#                 if rank_text and name:
#                     records.append({
#                         "rank": rank_text,   # 这里保留字符串，不强转 int
#                         "name": name,
#                         "score": score
#                     })
#         if records:
#             all_records.append((f"榜单{idx}", records))
#
#     return all_records


def safe_float(text: str):
    """安全转换成 float，如果不是数字返回 None"""
    try:
        return float(text)
    except (ValueError, TypeError):
        return None

def parse_rankings(html):
    soup = BeautifulSoup(html, "html.parser")
    tables = soup.find_all("table")
    all_records = []

    for idx, table in enumerate(tables, start=1):
        records = []
        for tr in table.find_all("tr"):
            tds = tr.find_all("td")
            if len(tds) >= 2:
                rank_text = tds[0].get_text(strip=True)
                name = tds[1].get_text(strip=True)
                score = None
                if len(tds) >= 3:
                    score_text = tds[2].get_text(strip=True)
                    score = safe_float(score_text)

                # 过滤掉空行和表头
                if rank_text and name and rank_text not in ("排名",):
                    records.append({
                        "rank": rank_text,  # 保留字符串，支持 "医77+"
                        "name": name,
                        "score": score
                    })

        if records:
            all_records.append((f"榜单{idx}", records))

    return all_records


# def main():
#     html = fetch_page(URL)
#     records = parse_rankings(html)
#
#     if not records:
#         print("⚠️ 没抓到数据，请检查 DOM 结构或网站是否屏蔽请求。")
#         return
#
#     # 输出前 10 条
#     print("示例数据：")
#     for r in records[:10]:
#         print(r)
#
#     # 保存为 CSV/Excel
#     df = pd.DataFrame(records)
#     df.to_csv("eol_universities.csv", index=False, encoding="utf-8-sig")
#     df.to_excel("eol_universities.xlsx", index=False)
#     print(f"✅ 抓取完成，共 {len(records)} 条，已保存到 eol_universities.csv / eol_universities.xlsx")


def main():
    html = fetch_page(URL)
    all_records = parse_rankings(html)

    if not all_records:
        print("⚠️ 没抓到数据")
        return

    # 保存 CSV
    with pd.ExcelWriter("eol_universities.xlsx") as writer:
        for sheet_name, records in all_records:
            df = pd.DataFrame(records)
            df.to_excel(writer, sheet_name=sheet_name, index=False)

    print(f"✅ 共抓取 {len(all_records)} 个榜单，已保存到 eol_universities.xlsx")

if __name__ == "__main__":
    main()
