import re

from loguru import logger

from com.arcfox.school.processor.base_processor import BaseProcessor
from com.arcfox.middleware import async_mysql_middleware as db
from bs4 import BeautifulSoup


class XiaoYouSchoolProcessor(BaseProcessor):
    def __init__(self):
        self.SOURCE = "校友会"
        self.parser_mapping = {
            "2022": self._parse_2022, "2021": self._parse_2021,
            "2020": self._parse_2020, "2019": self._parse_2019
        }

    async def parse_and_save_data(self, task, response):
        soup = BeautifulSoup(response, "lxml")
        result = self.parser_mapping[task['year']](soup)
        mapping = db.MysqlMapping("tb_university_ranking")
        for row in result:
            old_data = await mapping.query(
                {"university_uid": row["university_uid"], "ranking_year": task['year'], "ranking_source": self.SOURCE},
                ["id"]
            )
            if old_data:
                logger.info("数据已存在!")
                continue
            await mapping.insert(row)

    def _parse_2022(self, soup):
        table_tags = soup.find("div", {"class": "text"}).find_all("table")
        return self.__format_data("2022", table_tags[-1:])

    def _parse_2021(self, soup):
        table_tags = soup.find("div", {"class": "text"}).find_all("table")
        return self.__format_data("2021", table_tags[-6: -1])

    def _parse_2020(self, soup):
        table_tags = soup.find("div", {"class": "text"}).find_all("table")
        return self.__format_data("2020", table_tags[-7: -1])

    def _parse_2019(self, soup):
        table_tags = soup.find("div", {"class": "text"}).find_all("table")
        return self.__format_data("2019", table_tags[-8:])

    def __format_data(self, year, tables=[]):
        data_list = []
        for table in tables:
            rows = table.find_all("tr")[1:]
            for row in rows:
                p_tags = row.find_all("p")
                ranking_sort = p_tags[0].text
                # 剔除空格\u3000
                if not ranking_sort or not re.sub('\s', '', ranking_sort):
                    continue
                university_name = p_tags[1].text
                ranking_score = p_tags[2].text
                ranking_star = p_tags[3].text[:1]
                university_tags = p_tags[4].text
                data = {
                    "university_uid": self.generate_school_uid(university_name),
                    "university_name": university_name,
                    "ranking_sort": str(ranking_sort),
                    "ranking_year": year,
                    "ranking_score": ranking_score,
                    "ranking_star": ranking_star,
                    "university_tags": university_tags,
                    "ranking_source": self.SOURCE,
                }
                data_list.append(data)
        return data_list
