import os

from loguru import logger
from urllib import parse
from com.arcfox.school.processor.base_processor import BaseProcessor
from bs4 import BeautifulSoup
from com.arcfox.manager.redis_task_manager import RedisTaskManager
from com.arcfox.util.redis_key_manager import UNIVERSITY_URLS_KEY, UNIVERSITY_SUB_URLS_KEY
from com.arcfox.util import util


class UniversityImgProcessor(BaseProcessor):
    def __init__(self):
        pass

    async def parse_and_save_url(self, task, html):
        soup = BeautifulSoup(html, "lxml")
        url_list = self.parse_all_url(task, soup)
        if not url_list:
            await RedisTaskManager(UNIVERSITY_URLS_KEY).add_fail_tasks(task)
        await RedisTaskManager(UNIVERSITY_SUB_URLS_KEY).add_tasks(url_list)

    def parse_all_url(self, task, soup):
        '''
        解析所有二级页面
        :return:
        '''
        parsed_url = parse.urlparse(task["url"])
        scheme = parsed_url.scheme
        hostname = parsed_url.netloc
        a_tags = soup.find_all("a")
        url_list = []
        for a_tag in a_tags:
            if a_tag.has_attr('href'):
                a_url = a_tag.attrs['href']
                parsed_sub_url = parse.urlparse(a_url)
                if self.filter_url(hostname, parsed_sub_url):
                    full_url = self.__complete_full_url(a_url, scheme, hostname)
                    url_task = {"url": full_url, "uid": task['uid']}
                    logger.info(url_task)
                    url_list.append(url_task)
        return url_list

    def parse_all_imgs(self, task, html):
        '''
        解析所有图片
        :return:
        '''
        parsed_url = parse.urlparse(task["url"])
        hostname = parsed_url.netloc
        scheme = parsed_url.scheme
        soup = BeautifulSoup(html, "lxml")
        img_tags = soup.find_all("img")
        img_list = []
        for img_tag in img_tags:
            if img_tag.has_attr("src"):
                full_url = self.__complete_full_url(img_tag.attrs['src'], scheme, hostname)
                # 只要jpg文件
                if full_url.endswith("jpg"):
                    full_url = full_url.replace("../", "")
                    img_list.append(full_url)
        return img_list

    def save_imgs(self, task, img_url, img_buffer, img_count_map):
        try:
            # 只保留100KB - 2M之间的图片
            if len(img_buffer) / 1000 < 100 or len(img_buffer) / 1000 > 2048:
                return
            dir_name = f"D:/university/{task['uid']}"
            util.make_if_not_exists(dir_name)
            file_name = f"{util.get_md5(img_url)}.jpg"
            with open(f"{dir_name}/{file_name}", "wb") as f:
                f.write(img_buffer)
                logger.info(f"{img_url} saved!")
                if task['uid'] in img_count_map:
                    img_count_map[task['uid']] = img_count_map[task['uid']] + 1
                else:
                    img_count_map[task['uid']] = 1
        except Exception as e:
            pass

    def filter_url(self, hostname, parsed_sub_url):
        a_hostname = parsed_sub_url.netloc
        sub_hostname = util.multi_replace(hostname, ["www.", "/"], "")
        if (not a_hostname or sub_hostname in a_hostname) and (
                parsed_sub_url.path.endswith("htm") or parsed_sub_url.path.endswith("html")):
            return True
        return False

    def __complete_full_url(self, origin_url, scheme, hostname):
        # 如果url不带域名就用首页的域名
        parsed_sub_url = parse.urlparse(origin_url)
        if not parsed_sub_url.netloc:
            fixed_url = origin_url if origin_url.startswith("/") else f"/{origin_url}"
            fixed_url.replace("..", "")
            return f"{scheme}://{hostname}{fixed_url}"
        return origin_url
