from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
import pandas as pd

option = webdriver.ChromeOptions()
option.add_experimental_option("detach", True)
url = "https://www.shanghairanking.cn/rankings/bcur/2023"
browser = webdriver.Chrome(options=option)

try:
    browser.get(url)
    # 使用显式等待
    WebDriverWait(browser, 10).until(EC.presence_of_element_located((By.CSS_SELECTOR, 'table')))

    contents = []
    page = 1

    while page <= 20:
        print(f"开始爬取第 {page} 页")
        html = browser.page_source
        soup = BeautifulSoup(html, "html.parser")
        for tr in soup.find('tbody').find_all('tr'):
            tds = tr.find_all('td')
            name = tr.find(class_="name-cn").text.strip()
            contents.append([
                tds[0].text.strip(),
                name,
                tds[2].text.strip(),
                tds[3].text.strip(),
                tds[4].text.strip(),
                tds[5].text.strip()
            ])

        try:
            next_page = browser.find_element(By.CSS_SELECTOR, 'li.ant-pagination-next>a')
            next_page.click()
            page += 1
        except TimeoutException:
            print("找不到下一页，结束爬取。")
            break

    # 保存数据

    first_name = ["排名", "学校名称", "省市", "类型", "总分", "办学层次"]
    rank = pd.DataFrame(contents, columns=first_name)
    rank["排名"] = pd.to_numeric(rank["排名"], errors='coerce').astype(int)
    rank["总分"] = pd.to_numeric(rank["总分"], errors='coerce').astype(float)
    rank.to_excel("2023中国大学排名.xlsx", index=False)
    print("保存成功！")

except Exception as e:
    print(f"发生错误：{e}")

finally:
    browser.quit()