import asyncio

from loguru import logger

from com.arcfox.base.base_spider import ExecuteType
from com.arcfox.school.processor.jzy.jzy_school_list_processor import SchoolListProcessor
from com.arcfox.school.spider.jzy.jzy_base_spider import JZYBaseSpider
from com.arcfox.util import async_request as requests
from com.arcfox.util.util import random_sleep
from com.arcfox.middleware.proxy_zhima_middleware import get_proxy, format_proxy

'''
    聚志愿院校库列表抓取
'''


class SchoolListSpider(JZYBaseSpider):
    def __init__(self):
        super().__init__()
        self.start_url = f"{self.host}/schools/list_1.html"
        self.processor = SchoolListProcessor()
        self.page_size = 20

    async def _pull_task(self):
        return True, ExecuteType.ONCE

    async def _crawl_by_task(self, task):
        await self.init_session()
        task_keys = [self.TASK_KEY_SCHOOL_LIST, self.TASK_KEY_SCHOOL_MAJOR_LIST, self.TASK_KEY_MAJOR_SCORE,
                     self.TASK_SCHOOL_ZSJH_LIST_JZY, self.TASK_SCHOOL_PROVINCE_LIST_JZY]
        formatted_proxy = None
        # 重试 3 次
        for i in range(3):
            resp = await requests.get(self.session, url=self.start_url, headers=self.get_default_header(),
                                      proxy=formatted_proxy)
            if resp.code == 200:
                await self.processor.parse_and_store_school_list(resp.response, task_keys)
                total_pages = self.processor.parse_total_pages(resp.response, self.page_size)
                for page in range(2, total_pages + 1):
                    url = f"{self.host}/schools/list_{page}.html"
                    resp = await requests.get(self.session, url=url, headers=self.get_default_header(),proxy=formatted_proxy)
                    if resp.code == 200:
                        await self.processor.parse_and_store_school_list(resp.response, task_keys)
                    logger.info(f"正在抓取的url: {url} code: {resp.code}")
                    await random_sleep(1)
                break
            else:
                proxy = await get_proxy()
                formatted_proxy = format_proxy(proxy)
                logger.info("命中风控, 更换 IP")
                logger.warning("抓取第一页数据失败")


if __name__ == "__main__":
    asyncio.run(SchoolListSpider().start_crawl())
