from typing import Optional, List

from bs4 import BeautifulSoup, Tag, ResultSet
from lxml import etree
import htmlmin


# 解析html
# 以BeautifulSoup为主，然后结合etree让其能处理xpath
# BeautifulSoup功能足够强大且语法比较简便，结合etree让其有处理xpath，能更好的扩展其功能
# 此外，在以后遇到更好的解析html的模块，可以通过该类进行扩展
class ParseMain:
    def __init__(self, html: str):
        """
        初始化类的实例。

        Args:
            html (str): HTML内容。
        """
        # 使用BeautifulSoup初始化
        self.soup = BeautifulSoup(html, 'lxml')
        # 初始化时就进行美化
        self.soup.prettify()

        # 使用etree初始化
        self.etree_html = etree.HTML(html)

    # 压缩html 但输出的是字符串
    def minify_html(self, html: str) -> str:
        """
        将HTML代码进行最小化处理。

        Args:
            html (str): 原始的HTML代码。

        Returns:
            str: 最小化后的HTML代码。
        """
        return htmlmin.minify(html, remove_empty_space=True)

    # 输出获取的html
    def to_string(self) -> str:
        """
        将BeautifulSoup对象转换为漂亮的字符串形式。

        Returns:
            str: 漂亮的字符串形式。
        """
        return self.soup.prettify()

    # 根据选择器返回列表
    def find_by_selector(self, selector: str) -> ResultSet:
        """
        使用CSS选择器查找元素。

        Args:
            selector (str): CSS选择器字符串。

        Returns:
            ResultSet: 匹配选择器的元素集合（BeautifulSoup ResultSet 对象）。
        """
        try:
            return self.soup.select(selector)
        except Exception as e:
            print(f"选择器 '{selector}' 无效。发生错误：{e}")
            return []

    # 获取指定属性
    def get_by_attr(self, element: Tag, attr: str) -> str:
        """
        获取指定元素的特定属性值。

        Args:
            element (Tag): BeautifulSoup Tag 对象。
            attr (str): 要获取的属性名称。

        Returns:
            str: 指定属性的值。如果属性不存在，返回空字符串。
        """
        try:
            return element.attrs[attr]
        except KeyError:
            print(f"属性 '{attr}' 不存在于元素中。返回空字符串。")
            return ''
        except Exception as e:
            print(f"发生错误：{e}")
            return ''

    # 获取标签内容
    def get_innertext(self, element):
        # get_text()和string效果不同
        # 在除了文本内容还有子标签的时候
        # get_text()会将子标签的内容也返回出来将所有内容拼接为一个字符串(包括'/n')
        # 而string是返回None
        return element.get_text(' ', strip=True)

    # 将当前所有子节点的文本内容返回
    def get_child_innertext(self, element):
        """
        Get inner text from child elements of the given element, excluding newline characters.

        Args:
            element: BeautifulSoup Tag object.

        Returns:
            generator: A generator expression containing inner text of child elements.
        """
        return [self.get_innertext(child) for child in element.children if child != '\n']

    # 获取所有id=id的元素
    def find_all_by_id(self, element_id: str) -> List[Tag]:
        """
        Find all elements with the specified ID.

        Args:
            element_id (str): The ID attribute value to search for.

        Returns:
            List[Tag]: A list of BeautifulSoup Tag objects with the specified ID.
        """
        try:
            elements = self.soup.find_all(id=element_id)
            return elements
        except Exception as e:
            # Handle exceptions (e.g., BeautifulSoup errors) here
            print(f"Error: {e}")
            return []

    # 获取所有class=class_name的元素
    def find_all_by_class(self, class_name: str) -> List[Tag]:
        """
        Find all elements with the specified CSS class.

        Args:
            class_name (str): The CSS class name to search for.

        Returns:
            List[Tag]: A list of BeautifulSoup Tag objects with the specified class.
        """
        try:
            elements = self.soup.find_all(class_=class_name)
            return elements
        except Exception as e:
            # Handle exceptions (e.g., BeautifulSoup errors) here
            print(f"Error: {e}")
            return []

    # 根据xpath获取元素
    def find_by_xpath(self, xpath_expression: str) -> List[etree._Element]:
        """
        Find elements using XPath expression.

        Args:
            xpath_expression (str): The XPath expression to search for elements.

        Returns:
            List[etree._Element]: A list of lxml Element objects matching the XPath expression.
        """
        try:
            elements = self.etree_html.xpath(xpath_expression)
            return elements
        except Exception as e:
            # Handle exceptions (e.g., invalid XPath expression, parsing errors) here
            print(f"Error: {e}")
            return []

    def find(self, name: str, attrs: Optional[dict] = None) -> Optional[Tag]:
        """
        Find the first element with the given name and optional attributes.

        Args:
            name (str): The name of the tag to search for.
            attrs (dict, optional): A dictionary of attributes and their values.
                                     Defaults to None.

        Returns:
            Optional[Tag]: The first BeautifulSoup element matching the criteria, or None if not found.
        """
        try:
            if attrs is None:
                result = self.soup.find(name)
            else:
                result = self.soup.find(name, attrs=attrs)
            return result
        except Exception as e:
            # Handle exceptions (e.g., invalid input, BeautifulSoup errors) here
            print(f"Error: {e}")
            return None

    def find_all(self, name: str, attrs: Optional[dict] = None) -> List[BeautifulSoup]:

        """
        Find all elements with the given name and optional attributes.

        Args:
            name (str): The name of the tag to search for.
            attrs (dict, optional): A dictionary of attributes and their values.
                                     Defaults to None.

        Returns:
            List[BeautifulSoup]: A list of BeautifulSoup elements matching the criteria.
        """
        try:
            if attrs is None:
                result = self.soup.find_all(name)
            else:
                result = self.soup.find_all(name, attrs=attrs)

            return result
        except Exception as e:
            # Handle exceptions (e.g., invalid input, BeautifulSoup errors) here
            print(f"Error: {e}")
            return []


if __name__ == '__main__':
    html = """
    <html>
        <head>
            <title>The Dormouse's story</title>
        </head>
        <body>
            <p class="story" name="dromouse">
                Once upon a time there were three little sisters; and their names were
                <a href="http://example.com/elsie" class="sister" id="link1">
                    <span>我的吗<span>1</span><span>2</span></span>
                </a>
                <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a>
                and
                <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>
                and they lived at the bottom of a well.
            </p>
            <p class="story">...</p>
    """
    parse_html = ParseMain(html)
    elements = parse_html.find_all('p', {'class': 'story'})
    print(elements)
    print(type(elements))

    # for item in elements:
    #     # print(parse_html.get_by_attr(item,'href'))
    #     print(parse_html.get_innertext(item))
    #     print(parse_html.get_child_innertext(item))
