import re
from typing import Set, Dict, Tuple, Union
from urllib import parse

from bs4 import BeautifulSoup


class HTMLParser:
    """
    HTML 解析器
    """
    # 百家姓文件
    DIR = "data/lastnames.txt"
    # 演员关键词
    ACTOR_KEYWORDS = [
        "演员",
    ]
    # 人物关键词
    PERSON_KEYWORDS = [
        "出生地",
        "毕业院校",
        "出生日期",
        "国\xa0\xa0\xa0\xa0籍",
        "民\xa0\xa0\xa0\xa0族",
        "身\xa0\xa0\xa0\xa0高",
        "星\xa0\xa0\xa0\xa0座",
        "血\xa0\xa0\xa0\xa0型",
        "生\xa0\xa0\xa0\xa0肖",
        "性\xa0\xa0\xa0\xa0别",
    ]
    # 名字长度限制
    NAME_LENGTH = 4

    def __init__(self) -> None:
        self._soup = None
        self._current_url = None
        self._lastnames = self._get_lastnames()

    def parse(self, page: str, url: str) -> Tuple[Set[str], Dict[str, str]]:
        """
        解析网页
        """
        if page and url:
            self._soup = BeautifulSoup(page, "html.parser")
            self._current_url = url
            urls = self._get_page_urls()
            data = self._get_page_data()
            return urls, data

    def _get_page_urls(self) -> Set[str]:
        """
        获取页面中的 url, url可能是人名相关才添加
        """
        urls = self._soup.find_all(
            'a',
            href=re.compile(r'/item/(\%\w{2})+')
        )
        url_set = set()
        for url in urls:
            rel_path = parse.unquote(url["href"])
            if self._is_name(rel_path):
                path = parse.urljoin(self._current_url, rel_path)
                url_set.add(path)
        return url_set

    def _get_page_title(self) -> str:
        """
        获取页面标题
        """
        title = self._soup.find(
            'dd',
            class_='lemmaWgt-lemmaTitle-title'
        ).find('h1').get_text()
        return title

    def _get_page_summary(self) -> str:
        """
        获取页面摘要
        """
        summary = self._soup.find(
            'div',
            attrs={'label-module': 'lemmaSummary'},
            class_='lemma-summary'
        ).get_text()
        return summary

    def _get_page_info(self) -> str:
        """
        获取页面基本信息
        """
        info = self._soup.find(
            "div",
            class_="basic-info"
        ).get_text()
        return info

    def _get_page_data(self) -> Union[Dict[str, str], None]:
        """
        获取页面数据
        """
        info = self._get_page_info()
        summary = self._get_page_summary()
        if self._is_person(info) and self._is_actor(summary):
            data = dict()
            data["title"] = self._get_page_title()
            data["url"] = self._current_url
            data["summary"] = summary
            data["basic-info"] = info
            return data

    @staticmethod
    def _get_lastnames() -> Set[str]:
        """
        获取姓氏集合
        """
        with open(HTMLParser.DIR, "r") as f:
            data = [i.strip() for i in f.readlines() if i.strip()]
        return set(data)

    def _is_name(self, rel_path: str) -> bool:
        """
        是否是名字 
        eg: /item/黄晓明/
        """
        name = rel_path.split('/')[2]
        top1, top2 = name[:1], name[:2]
        if len(name) <= HTMLParser.NAME_LENGTH and top1 in self._lastnames or top2 in self._lastnames:
            return True
        return False

    @staticmethod
    def _is_person(info) -> bool:
        """
        通过关键词判断页面是否是人物
        """
        for key in HTMLParser.PERSON_KEYWORDS:
            if key in info:
                return True
        return False
    
    @staticmethod
    def _is_actor(summary) -> bool:
        """
        通过关键词判断页面是否是演员
        """
        for key in HTMLParser.ACTOR_KEYWORDS:
            if key in summary:
                return True
        return False
    

