import math
import time
from urllib.parse import quote

import requests
from pandas import DataFrame
from rich.progress import track


class BaiduGkCrawler:
    def __init__(self, origin_province: str):
        """
        Initialize the BaiduGkCrawler with an origin province.
        """
        self.headers = {
            "user-agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/127.0.0.0 Safari/537.36 Edg/127.0.0.0",
        }
        self.origin_province = origin_province
        self.url = f"https://gaokao.baidu.com/gk/gkschool/list?userProvince={quote(origin_province)}"
        self.page_nums, self.score_list_length, self.df = self._fetch_basic_info()

    def _fetch_basic_info(self):
        """
        Fetch the basic information, including the total number of pages and score list length.
        """
        try:
            response = requests.get(self.url, headers=self.headers)
            response.raise_for_status()
            result = response.json()

            score_list_length = len(result["data"]["ranking"]["tRow"][0]["score_list"])

            columns = [
                "院校名称",
                "省份",
                "城市",
                "院校类型",
                "教育层次",
                "开办属性",
                "录取批次",
                "选科/分数/名次 A",
            ]

            if score_list_length > 1:
                columns.extend(["选科/分数/名次 B"])

            df = DataFrame(columns=columns)

            total = result["data"]["pageInfo"]["total"]
            res_num = result["data"]["pageInfo"]["rn"]
            page_nums = math.ceil(total / res_num)

            return page_nums, score_list_length, df

        except (requests.RequestException, KeyError) as e:
            print(f"Error fetching basic info: {e}")
            return 0, 0, None

    @staticmethod
    def _extract_value(school: dict, key: str, default: str = "") -> str:
        """
        Extracts a value from the given dictionary for the specified key. If the key is not present,
        it returns a default value.
        """
        return school.get(key, default)

    def _get_school_info(self, result: dict) -> bool:
        """
        Extract and store university data from the JSON response.
        """
        try:
            schools = result["data"]["ranking"]["tRow"]
            for school in schools:
                row = [
                    school["college_name"],
                    self._extract_value(school, "province"),
                    self._extract_value(school, "city"),
                    self._extract_value(school, "school_type"),
                    self._extract_value(school, "education"),
                    self._extract_value(school, "nature"),
                    school["batch"],
                    school["score_list"][0],
                ]
                if self.score_list_length > 1:
                    row.append(school["score_list"][1])

                self.df.loc[len(self.df)] = row

            return True

        except (KeyError, TypeError) as e:
            print(f"Error processing school info: {e}")
            return False

    def _fetch_school_data(self, page_num: int) -> dict:
        """
        Fetch data for a specific page.
        """
        try:
            response = requests.get(
                f"{self.url}&needFilter=1&pn={page_num}&rn=10", headers=self.headers
            )
            response.raise_for_status()
            return response.json()

        except requests.RequestException as e:
            print(f"Error fetching data for page {page_num}: {e}")
            return {}

    def run(self):
        """
        Main method to run the spider and collect the data.
        """
        for page_num in track(
            range(1, self.page_nums + 1), description=self.origin_province
        ):
            while True:
                result = self._fetch_school_data(page_num)
                if result and self._get_school_info(result):
                    break
                print(f"Retrying page {page_num}...")
                time.sleep(1)

        self.df.to_csv(
            f"{self.origin_province}高考院校汇总.csv",
            index=False,
            encoding="utf-8",
        )
