# -*- coding: utf-8 -*-
import requests
import re
import sqlite3
import json

class CollegeRankingCrawler:
    def __init__(self):
        # 尝试使用不同的URL
        self.urls = [
            'https://www.shanghairanking.cn/api/pub/v1/bcur/2024',  # API接口
            'https://www.shanghairanking.cn/rankings/bcur/2024',    # 网页接口
            'https://www.shanghairanking.cn/_nuxt/static/rankings/bcur/2024/payload.js'  # 静态文件
        ]
        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
            "Referer": "https://www.shanghairanking.cn/",
            "Accept": "application/json, text/plain, */*"
        }
    
    def init_database(self):
        """初始化数据库"""
        with sqlite3.connect('college_data.db') as conn:
            conn.execute('''
                CREATE TABLE IF NOT EXISTS college_info (
                    id INTEGER PRIMARY KEY AUTOINCREMENT,
                    rank INTEGER NOT NULL,
                    name VARCHAR(50) NOT NULL,
                    province VARCHAR(20), 
                    category VARCHAR(20),
                    score DECIMAL(10,1),
                    UNIQUE(name)
                )
            ''')
    
    def save_to_db(self, data):
        """保存数据到数据库"""
        with sqlite3.connect('college_data.db') as conn:
            conn.execute(
                'INSERT OR IGNORE INTO college_info (rank, name, province, category, score) VALUES (?, ?, ?, ?, ?)',
                data
            )
    
    def try_different_urls(self):
        """尝试不同的URL获取数据"""
        for url in self.urls:
            try:
                print(f"尝试URL: {url}")
                response = requests.get(url=url, headers=self.headers, timeout=10)
                response.raise_for_status()
                
                # 尝试解析JSON格式
                if response.text.strip():
                    try:
                        data = response.json()
                        if data and 'data' in data:
                            return self.parse_api_data(data)
                    except json.JSONDecodeError:
                        # 如果不是JSON，尝试正则解析
                        return self.parse_with_regex(response.text)
                
            except Exception as e:
                print(f"URL {url} 失败: {e}")
                continue
        
        return None
    
    def parse_api_data(self, data):
        """解析API返回的JSON数据"""
        try:
            items = data.get('data', {}).get('rankings', [])
            if not items:
                items = data.get('data', [])
            
            college_info = []
            for item in items:
                college_data = (
                    str(item.get('ranking', '')),
                    item.get('univNameCn', ''),
                    item.get('province', ''),
                    item.get('univCategory', ''),
                    str(item.get('score', ''))
                )
                college_info.append(college_data)
            
            return college_info
        except Exception as e:
            print(f"解析API数据失败: {e}")
            return None
    
    def parse_with_regex(self, text):
        """使用正则表达式解析数据"""
        try:
            # 尝试匹配JSON格式的数据
            json_pattern = r'__NUXT_JSONP__\([^)]+\)|window\.__NUXT__\s*=\s*({[^;]+})'
            match = re.search(json_pattern, text, re.DOTALL)
            
            if match:
                json_str = match.group(1) if match.group(1) else match.group(0)
                # 清理JSON字符串
                json_str = re.sub(r'^[^{]*', '', json_str)
                json_str = re.sub(r'[^}]*$', '', json_str)
                
                data = json.loads(json_str)
                return self.parse_api_data(data)
            
            # 如果没有匹配到JSON，尝试原始的正则方法
            patterns = {
                'rank': r'"ranking":\s*"([^"]+)"',
                'name': r'"univNameCn":\s*"([^"]+)"',
                'province': r'"province":\s*"([^"]+)"',
                'category': r'"univCategory":\s*"([^"]+)"',
                'score': r'"score":\s*"([^"]+)"'
            }
            
            extracted_data = {}
            for key, pattern in patterns.items():
                matches = re.findall(pattern, text)
                extracted_data[key] = matches
            
            # 检查是否获取到数据
            min_length = min(len(extracted_data[key]) for key in extracted_data if extracted_data[key])
            if min_length == 0:
                return None
            
            college_info = []
            for i in range(min_length):
                item = (
                    extracted_data['rank'][i] if i < len(extracted_data['rank']) else '',
                    extracted_data['name'][i] if i < len(extracted_data['name']) else '',
                    extracted_data['province'][i] if i < len(extracted_data['province']) else '',
                    extracted_data['category'][i] if i < len(extracted_data['category']) else '',
                    extracted_data['score'][i] if i < len(extracted_data['score']) else ''
                )
                college_info.append(item)
            
            return college_info
            
        except Exception as e:
            print(f"正则解析失败: {e}")
            return None
    
    def show_db_data(self):
        """显示数据库数据"""
        print("存储在数据库的数据：")
        with sqlite3.connect('college_data.db') as conn:
            cursor = conn.cursor()
            cursor.execute('SELECT rank, name, province, category, score FROM college_info ORDER BY rank')
            rows = cursor.fetchall()
        
        self._print_table(rows)
        return len(rows)
    
    def _print_table(self, rows=None):
        """打印表格"""
        header = f"{'排名':<5}{'学校':<25}{'省市':<10}{'类型':<8}{'总分':<8}"
        print(header)
        print("-" * 65)
        
        if rows:
            for row in rows:
                print(f"{str(row[0]):<5}{row[1]:<25}{row[2]:<10}{row[3]:<8}{str(row[4]):<8}")
        else:
            print("暂无数据")
    
    def run(self):
        """主运行方法"""
        self.init_database()
        
        print("正在爬取大学排名数据...")
        college_data = self.try_different_urls()
        
        if college_data:
            print("\n爬取结果：")
            self._print_table(college_data)
            
            # 保存到数据库
            for data in college_data:
                self.save_to_db(data)
            
            total_count = len(college_data)
            print(f"\n成功爬取并保存 {total_count} 条数据")
        else:
            print("未能获取到数据，可能的原因：")
            print("1. 网站结构已更改")
            print("2. 需要更新URL或解析方法")
            print("3. 网络连接问题")
            print("4. 需要处理反爬虫机制")
        
        # 显示数据库中的数据
        db_count = self.show_db_data()
        print(f"\n共处理 {db_count} 条数据")

if __name__ == "__main__":
    crawler = CollegeRankingCrawler()
    crawler.run()