import requests
from bs4 import BeautifulSoup
import pandas as pd
import os
import time
import re
from urllib.parse import urljoin, urlparse

# 配置
BASE_URL = "https://www.shanghairanking.cn/rankings/bcur/2025"
HEADERS = {
    "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
}

# 创建保存图片的目录
IMAGE_DIR = "university_logos"
os.makedirs(IMAGE_DIR, exist_ok=True)


def fetch_page(url):
    """获取页面内容"""
    try:
        response = requests.get(url, headers=HEADERS, timeout=10)
        response.raise_for_status()
        response.encoding = response.apparent_encoding
        return response.text
    except Exception as e:
        print(f"获取页面失败 {url}: {e}")
        return None


def download_image(img_url, filename):
    """下载图片并保存到本地"""
    try:
        # 确保URL是完整的
        if img_url.startswith("//"):
            img_url = "https:" + img_url
        elif img_url.startswith("/"):
            img_url = urljoin(BASE_URL, img_url)

        response = requests.get(img_url, headers=HEADERS, timeout=10)
        response.raise_for_status()

        # 保存图片
        filepath = os.path.join(IMAGE_DIR, filename)
        with open(filepath, 'wb') as f:
            f.write(response.content)
        print(f"已下载图片: {filename}")
        return filepath
    except Exception as e:
        print(f"下载图片失败 {img_url}: {e}")
        return None


def parse_page(html, page_num):
    """解析页面内容，提取大学信息"""
    soup = BeautifulSoup(html, 'html.parser')
    universities = []

    # 查找包含大学信息的表格
    table = soup.find('table', class_='rk-table')
    if not table:
        print("未找到排名表格")
        return universities

    # 查找所有行
    rows = table.find('tbody').find_all('tr')

    for row in rows:
        cols = row.find_all('td')
        if len(cols) >= 5:
            try:
                # 提取排名
                rank = cols[0].get_text(strip=True)

                # 提取学校名称和Logo
                name_col = cols[1]
                school_name = ""
                logo_url = ""

                # 尝试从不同结构中提取学校名称
                univ_name_container = name_col.find(class_='univ-name')
                if univ_name_container:
                    school_name = univ_name_container.get_text(strip=True)
                else:
                    univ_link = name_col.find(class_='univ-link')
                    if univ_link:
                        school_name = univ_link.get_text(strip=True)
                    else:
                        school_name = name_col.get_text(strip=True)

                # 提取Logo URL
                logo_img = name_col.find('img', class_='univ-logo')
                if logo_img and logo_img.get('src'):
                    logo_url = logo_img['src']

                # 提取学校所在国家/地区
                country = cols[2].get_text(strip=True)

                # 提取国家/地区排名
                country_rank = cols[3].get_text(strip=True)

                # 提取总分
                total_score = cols[4].get_text(strip=True)

                # 提取指标得分（如果存在）
                indicator_score = ""
                if len(cols) > 5:
                    indicator_score = cols[5].get_text(strip=True)

                # 下载Logo图片
                local_logo_path = ""
                if logo_url:
                    # 从URL中提取文件名
                    parsed_url = urlparse(logo_url)
                    logo_filename = os.path.basename(parsed_url.path)
                    if not logo_filename:
                        # 如果没有文件名，使用学校名和页码生成
                        safe_school_name = re.sub(r'[<>:"/\\|?*\x00-\x1F]', '_', school_name)
                        logo_filename = f"{safe_school_name}_page{page_num}.png"

                    local_logo_path = download_image(logo_url, logo_filename)

                universities.append({
                    '排名': rank,
                    '学校名称': school_name,
                    '国家/地区': country,
                    '国家/地区排名': country_rank,
                    '总分': total_score,
                    '指标得分': indicator_score,
                    'Logo路径': local_logo_path if local_logo_path else "无"
                })

            except Exception as e:
                print(f"解析行数据时出错: {e}")
                continue

    return universities


def get_total_pages(html):
    """获取总页数"""
    soup = BeautifulSoup(html, 'html.parser')
    # 查找分页控件
    pagination = soup.find('ul', class_='ant-pagination')
    if pagination:
        # 查找最大的页码
        page_items = pagination.find_all('li', class_=re.compile(r'ant-pagination-item-\d+'))
        if page_items:
            page_numbers = []
            for item in page_items:
                title = item.get('title')
                if title and title.isdigit():
                    page_numbers.append(int(title))
            if page_numbers:
                return max(page_numbers)
    # 默认返回34页（从示例HTML中看到）
    return 34


def main():
    print("开始抓取软科中国大学排名2025数据...")

    # 获取第一页内容
    html = fetch_page(BASE_URL)
    if not html:
        print("无法获取首页内容")
        return

    # 获取总页数
    total_pages = get_total_pages(html)
    print(f"检测到总页数: {total_pages}")

    # 解析第一页
    all_universities = parse_page(html, 1)
    print(f"第1页数据抓取完成，获取到 {len(all_universities)} 条记录")

    # 抓取剩余页面
    for page in range(2, total_pages + 1):
        page_url = f"https://www.shanghairanking.cn/rankings/bcur/2025/{page}"
        print(f"正在抓取第 {page} 页...")

        page_html = fetch_page(page_url)
        if page_html:
            universities = parse_page(page_html, page)
            all_universities.extend(universities)
            print(f"第 {page} 页数据抓取完成，获取到 {len(universities)} 条记录")
        else:
            print(f"第 {page} 页抓取失败")

        # 添加延时，避免请求过于频繁
        time.sleep(1)

    # 保存为Excel文件
    if all_universities:
        df = pd.DataFrame(all_universities)
        excel_filename = "软科中国大学排名2025.xlsx"
        df.to_excel(excel_filename, index=False)
        print(f"\n数据已保存到 {excel_filename}，共 {len(all_universities)} 条记录")
        print(f"学校Logo已保存到 {IMAGE_DIR} 目录")
    else:
        print("未获取到任何数据")


if __name__ == "__main__":
    main()