import asyncio

from com.arcfox.base.base_spider import ExecuteType
from com.arcfox.manager.redis_task_manager import RedisTaskManager
from com.arcfox.school.processor.yzy.yzy_province_score_processor import YZYProvinceScoreProcessor
from com.arcfox.school.processor.yzy.yzy_university_enroll_processor import YZYUniversityEnrollProcessor
from com.arcfox.school.processor.yzy.yzy_university_score_processor import YZYUniversityScoreProcessor
from com.arcfox.school.spider.yzy.yzy_base_spider import YZYBaseSpider
from com.arcfox.util import async_request as requests
from com.arcfox.util.redis_key_manager import YZY_UNIVERSITY_SCHOOL_LIST_KEY, YZY_UNIVERSITY_PROVINCE_SCHOOL_LIST_KEY, \
    YZY_UNIVERSITY_ENROLL_LIST_KEY
from loguru import logger


class YZYUniversityEnrollInfoSpider(YZYBaseSpider):
    '''
    优志愿学校列表抓取, 用列表数据抓取学校相关的图片, 以及学校编码更新到tb_university_info
    '''

    def __init__(self):
        super().__init__()
        self.url = "https://uwf7de983aad7a717eb.youzy.cn/youzy.dms.basiclib.api.college.news.bykeywords.search"
        self.start_page = 1
        self.processor = YZYUniversityEnrollProcessor()
        self.task_manager = RedisTaskManager(YZY_UNIVERSITY_ENROLL_LIST_KEY)

    def get_params(self, unversity_id):
        return '{"collegeCode":"' + str(unversity_id) + '","pageIndex":' + str(self.start_page) + ',"keywords":""}'

    def get_content_params(self, id):
        return {
            'id': id,
            'isAddHits': 'true',
        }

    def get_headers(self, params):
        return {
            "Host": "uwf7de983aad7a717eb.youzy.cn",
            "u-sign": self.get_u_sgin(params),
            "Content-Type": "application/json",
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36",
            "Referer": "https://pv4y-pc.youzy.cn/",
        }

    def get_content_headers(self, params):
        return {
            "Host": "uwf7de983aad7a717eb.youzy.cn",
            "u-sign": self.get_u_sgin(f"id={params['id']}&isAddHits={params['isAddHits']}"),
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36",
            "Referer": "https://pv4y-pc.youzy.cn/",
        }

    async def _pull_task(self):
        return await self.task_manager.pull_tasks(10), ExecuteType.FINISH

    async def _crawl_by_task(self, tasks):
        await self.init_session()
        for task in tasks:
            try:
                while True:
                    logger.info("正在爬取：{}", task["university_name"])
                    params = self.get_params(task['yzy_code'])
                    # params = self.get_params("10008")
                    resp = await requests.post_with_session(url=self.url, data=params, headers=self.get_headers(params))
                    if resp.code == 200:
                        self.start_page += 1
                        result_json = resp.json()
                        result = result_json["result"]
                        result_arr = result["items"]
                        if len(result_arr) == 0:
                            logger.info("没有更多数据了")
                            self.start_page = 1
                            break
                        parse_result = await self.processor.parse_info_url_list(result_arr, task["university_name"])
                        for item in parse_result:
                            content_paras = self.get_content_params(item["enroll_info_id"])
                            resp_content = await requests.post_with_session(
                                url="https://uwf7de983aad7a717eb.youzy.cn/youzy.dms.basiclib.api.college.news.get",
                                data=content_paras,
                                headers=self.get_content_headers(content_paras))

                            if resp_content.code == 200:
                                await self.processor.parse_content_and_save(item, resp_content.json())
                            # 说明是最后一页 ，或者后面都是旧数据了
                        if len(parse_result) < 20:
                            self.start_page = 1
                            break

            except Exception as e:
                logger.error("{}爬取失败,{}", task, e)
                # await self.task_manager.add_fail_tasks(task)


if __name__ == "__main__":
    asyncio.run(YZYUniversityEnrollInfoSpider().start_crawl())
