import datetime

from bs4 import BeautifulSoup
from loguru import logger

from com.arcfox.school.processor.base_processor import BaseProcessor
from com.arcfox.util.date import now_str
from com.arcfox.util.util import get_md5
from com.arcfox.middleware import async_mysql_middleware as db


class SunEnrollmentPlanProcessor(BaseProcessor):
    def __init__(self):
        pass

    def parse_page_list(self, html):
        soup = BeautifulSoup(html, "lxml")
        table_tags = soup.find("div", {"class", "width1000"}).find_all("table")[-1]
        tr_tags = table_tags.find_all("tr")
        page_list = []
        for tr_tag in tr_tags:
            td_tags = tr_tag.find_all("td")
            if len(td_tags) == 0:
                continue
            for td_tag in td_tags:
                a_tag = td_tag.find("a")
                if a_tag is None:
                    logger.info("{}:a_tag is None", td_tag.text)
                    continue
                flag = a_tag.get("style")
                if flag != 'color:gray':
                    text = str(a_tag.text).replace("\n", "").replace("\r", "").replace("\t", "").strip()
                    page_list.append({
                        "university_name": text,
                        "url": a_tag.get("href")
                    })
        return page_list

    def parse_page_count(self, html):
        soup = BeautifulSoup(html, "lxml")
        last_li = soup.find("form", {"id": "PageForm"}).find_all("li")[-2]
        logger.info(last_li.text)
        return int(last_li.text)

    def parse_url(self, html):
        soup = BeautifulSoup(html, "lxml")
        div_tag = soup.find("div", {"class": "zszcdel"})
        a_tag = div_tag.find("a")
        if a_tag is None:
            logger.info("{}a_tag is None", div_tag)
            return None
        return a_tag.get("href")

    async def parse_and_save(self, html, url, university_name):
        university_uid = self.generate_school_uid(university_name)
        enroll_sign = get_md5(url)
        soup = BeautifulSoup(html, "lxml")

        div_title = soup.find("div", {"class": "width1000"})
        title = div_title.find("h2").text.replace("\n", "").replace("\r", "").replace("\t", "").strip()
        content = soup.find("div", {"class": "content"}).text

        mapping = db.MysqlMapping("tb_university_enroll_info")

        exist = await mapping.query(
            {
                "university_uid": university_uid,
                "title": title,
            }, ["id"])
        if exist:
            logger.info("已经存在,university_name:{},title:{}", university_name, title)
            return
        item = {
            "university_uid": university_uid,
            "university_name": university_name,
            "content": content,
            "title": title,
            "url": url,
            "enroll_info_id": enroll_sign,
            "publish_time": now_str(),
            "data_source": "sungaokao"
        }
        await mapping.insert(item)
