import requests
import pandas as pd
import os
import json
import time
import re

SCHOOL_INFO_URL = "https://static-data.gaokao.cn/www/2.0/school/list_v2.json?a=www.gaokao.cn"
SPECICAL_SCORE_URL_TEMPLATE = "https://static-data.gaokao.cn/www/2.0/schoolspecialscore/{0}/{1}/{2}.json?a=www.gaokao.cn"
GUANGXI_ID = 45

def get_special_score_url(school_id, year, province_id):
    return SPECICAL_SCORE_URL_TEMPLATE.format(
        school_id,
        year,
        province_id
    )

def get_all_schools():
    response = requests.get(SCHOOL_INFO_URL)
    response.encoding = 'utf-8'
    data = response.json()
    if data['code'] == '0000':
        return data['data']
    return None

def sanitize_filename(name):
    """清理文件名中的非法字符"""
    return re.sub(r'[<>:"/\\|?*]', '_', name)

def get_raw_data_path(school_id, year, school_name):
    """获取原始数据文件路径"""
    safe_name = sanitize_filename(school_name)
    filename = f"{school_id}-{year}-{safe_name}.json"
    return os.path.join('./raw_pages/special_scores/', filename)

def load_raw_data(file_path):
    """从本地文件加载原始数据"""
    try:
        with open(file_path, 'r', encoding='utf-8') as f:
            return json.load(f)
    except (FileNotFoundError, json.JSONDecodeError):
        return None

def save_raw_data(file_path, data):
    """保存原始数据到本地文件"""
    os.makedirs(os.path.dirname(file_path), exist_ok=True)
    with open(file_path, 'w', encoding='utf-8') as f:
        json.dump(data, f, ensure_ascii=False, indent=2)

def get_special_score(school_id, year, province_id, school_name):
    """获取专业分数数据，优先从本地文件加载"""
    raw_file_path = get_raw_data_path(school_id, year, school_name)
    
    # 先尝试从本地文件加载
    raw_data = load_raw_data(raw_file_path)
    if raw_data:
        print(f"    从本地文件加载数据: {os.path.basename(raw_file_path)}")
        if raw_data.get('code') == '0000':
            return raw_data.get('data')
        return None
    
    # 本地文件不存在，从网络获取
    url = get_special_score_url(school_id, year, province_id)
    try:
        response = requests.get(url)
        response.encoding = 'utf-8'
        data = response.json()
        
        # 保存原始数据到文件
        save_raw_data(raw_file_path, data)
        print(f"    已保存原始数据到: {os.path.basename(raw_file_path)}")
        
        if data['code'] == '0000':
            return data['data']
        else:
            print(f"API returned error code {data.get('code')} for school {school_id} year {year}")
            return None
    except json.JSONDecodeError:
        print(f"Failed to decode JSON for school {school_id} year {year}")
        return None
    except Exception as e:
        print(f"Error fetching data for school {school_id} year {year}: {str(e)}")
        return None

def check_if_crawled(school_id, year, school_name):
    """检查是否已经爬取过该学校该年份的数据"""
    raw_file_path = get_raw_data_path(school_id, year, school_name)
    return os.path.exists(raw_file_path)

def main():
    # 确保输出目录存在
    os.makedirs('./raw_pages/special_scores/', exist_ok=True)

    schools = get_all_schools()
    if not schools:
        print("Failed to get school list")
        return

    total_schools = len(schools)
    processed_schools = 0
    
    for school_id, school_info in schools.items():
        processed_schools += 1
        school_name = school_info.get('name', 'Unknown')
        print(f"处理学校 {processed_schools}/{total_schools}: {school_name} (ID: {school_id})")
        
        for year in range(2018, 2025):
            if check_if_crawled(school_id, year, school_name):
                print(f"  {year} 年数据已存在，跳过...")
                continue
            
            print(f"  爬取 {year} 年数据...")
            special_scores = get_special_score(school_id, year, GUANGXI_ID, school_name)
            
            if special_scores:
                print(f"    成功获取 {year} 年数据")
            else:
                print(f"    {year} 年无数据")
            
            # 添加延时避免请求过快
            time.sleep(0.1)

    print("数据爬取完成！")
    print("请运行 parser.py 来解析数据并生成CSV文件")

if __name__ == '__main__':
    main()

