import asyncio

from loguru import logger

from com.arcfox.base.base_spider import BaseSpider, ExecuteType
from com.arcfox.middleware.async_redis_middleware import open_redis
from com.arcfox.school.processor.sun.sun_enrollment_plan_processor import SunEnrollmentPlanProcessor
from com.arcfox.util import async_request as requests


class SunEnrollmentPlanSpider(BaseSpider):
    def __init__(self):
        self.url = "https://gaokao.chsi.com.cn/zsgs/zhangcheng/listVerifedZszc--method-index,lb-1,start-0.dhtml"
        self.processor = SunEnrollmentPlanProcessor()

    @open_redis
    async def init_data_version(self, client):
        return await super().init_data_version(client)

    async def _pull_task(self):
        return None, ExecuteType.ONCE

    async def _crawl_by_task(self, task):
        resp = await requests.get_with_session(self.url, headers=self.get_default_header(), timeout=5)
        if resp.code == 200:
            parse_page_count = self.processor.parse_page_count(resp.response)
            logger.info("parse_page_count: {}".format(parse_page_count))
            page_list = self.processor.parse_page_list(resp.response)
            for i in range(1, parse_page_count + 1):
                page = i * 100
                url = f"https://gaokao.chsi.com.cn/zsgs/zhangcheng/listVerifedZszc--method-index,lb-1,start-{page}.dhtml"
                resp = await requests.get_with_session(url, headers=self.get_default_header(), timeout=5)
                if resp.code == 200:
                    page_list += self.processor.parse_page_list(resp.response)

            logger.info("page_list 数量: {}".format(len(page_list)))
            for page in page_list:
                url_resp = await requests.get_with_session("https://gaokao.chsi.com.cn" + page["url"],
                                                           headers=self.get_default_header(), timeout=5)
                if url_resp.code == 200:
                    result_url = self.processor.parse_url(url_resp.response)
                    if result_url is None:
                        continue
                    real_url = "https://gaokao.chsi.com.cn" + result_url
                    real_resp = await requests.get_with_session(real_url, headers=self.get_default_header(), timeout=5)
                    if real_resp.code == 200:
                        await self.processor.parse_and_save(real_resp.response, real_url, page["university_name"])


if __name__ == "__main__":
    asyncio.run(SunEnrollmentPlanSpider().start_crawl())
