# 页面分析器
# by lw 2023-11-7

import requests
from bs4 import BeautifulSoup
from lxml import etree
import re


class HTMLAnalyzer:

    def __init__(self):
        self.soup = None
        self.tree = None

    def load_page(self, url: str) -> bool:
        """
        从指定URL加载页面内容并初始化BeautifulSoup、etree对象

        Args:
            url (str): 页面URL

        Returns:
            bool: 加载是否成功
        """
        try:
            response = requests.get(url)
            html_content = response.content
            self.soup = BeautifulSoup(html_content, 'html.parser')
            self.tree = etree.HTML(html_content)
            return True
        except Exception as e:
            print("Failed to load page:", e)
            return False

    def load_html(self, html_content):
        """
        加载HTML内容并初始化BeautifulSoup、etree对象

        Args:
            html_content (str): HTML内容
        """
        self.soup = BeautifulSoup(html_content, 'html.parser')
        self.tree = etree.HTML(html_content)

    def xextract_keyword_content(self, keyword: str, path: str,
                                 reg: re) -> str:
        """
        使用XPath提取关键词内容

        Args:
            keyword (str): 关键词
            path (str): XPath路径
            reg (re): 正则表达式

        Returns:
            str: 提取的关键词内容
        """
        keyword_content = ""
        element = self.tree.xpath(path + '//text()[contains(., "' + keyword +
                                  '")]')
        if element:
            keyword_content_text = element[0]
            match = re.search(reg, keyword_content_text)
            if match:
                keyword_content = match.group(1)
        return keyword_content

    def __locate_node_by_id_(self, parent_id: str, keyword: str) -> list:
        """
        依据父容器的ID查找包含关键词的节点

        Args:
            parent_id (:st): 父容器ID
            keyword (:str): 关键词

        Returns:
            list: 包含所有匹配节点的列表
        """
        parent_div = self.soup.find(id=parent_id)
        matching_nodes = []
        if parent_div:
            matching_nodes = parent_div.find_all(string=re.compile(keyword))
        return matching_nodes

    def __locate_node_by_tag_(self, parent_tag: str, keyword: str) -> list:
        """
        依据父容器的标签类型查找包含关键词的节点

        Args:
            parent_tag (str): 父容器标签类型
            keyword (str): 关键词

        Returns:
            list: 包含所有匹配节点的列表
        """
        parent_div = self.soup.find(parent_tag)
        matching_nodes = []
        if parent_div:
            matching_nodes = parent_div.find_all(string=re.compile(keyword))
        return matching_nodes

    def __locate_node_by_class_(self, parent_class: str, keyword: str) -> list:
        """
        依据父容器的标签样式名查找包含关键词的节点

        Args:
            parent_class (str): 父容器标签样式名
            keyword (str): 关键词

        Returns:
            list: 包含所有匹配节点的列表
        """
        parent_div = self.soup.find(class_=parent_class)
        matching_nodes = []
        if parent_div:
            matching_nodes = parent_div.find_all(string=re.compile(keyword))
        return matching_nodes

    def bs4extract_keyword_content(self,
                                   keyword: str,
                                   reg: re,
                                   parent_tag: str = None,
                                   parent_id: str = None,
                                   parent_class: str = None) -> str:
        """
        使用BeautifulSoup提取关键词内容

        Args:
            keyword (str): 关键词
            reg (re): 正则表达式
            parent_tag (str, optional): 父容器标签类型. Defaults to None.
            parent_id (str, optional): 父容器ID. Defaults to None.
            parent_class (str, optional): 父容器样式名. Defaults to None.

        Returns:
            str: 提取的关键词内容
        """
        keyword_content = ""
        if parent_tag:
            nodes = self.__locate_node_by_tag_(parent_tag, keyword)
            if nodes:
                for element in nodes:
                    match = re.search(reg, element)
                    if match:
                        keyword_content = match.group(1)
                        break
        else:
            if parent_id:
                nodes = self.__locate_node_by_id_(parent_id, keyword)
                if nodes:
                    for element in nodes:
                        match = re.search(reg, element)
                        if match:
                            keyword_content = match.group(1)
                            break
            else:
                if parent_class:
                    nodes = self.__locate_node_by_class_(parent_class, keyword)
                    if nodes:
                        for element in nodes:
                            match = re.search(reg, element)
                            if match:
                                keyword_content = match.group(1)
                                break
        return keyword_content


# 测试代码：
if __name__ == '__main__':
    # 测试提取函数
    analyzer = HTMLAnalyzer()
    html_content = '''
    <body>
        <div class="clearfix" id="newsText">
            <div>
                <div class="protect" id="noticeArea">
                    <meta http-equiv="content-type" content="text/html;charset=utf-8">
                    <p data-v-6b13d514=""></p>
                    <h4><strong>一、项目编号：N5100012022001470</strong>asdfadsaf</h4>
                    <h4><strong>二、项目名称：西南区域环境空气质量预测预报中心能力提升项目</strong></h4>
                </div>
            </div>
        </div>
    </body>
    '''
    # 需要加载的网页链接
    # analyzer.load_html(html_content)
    analyzer.load_page(
        'https://ggzyjy.sc.gov.cn/cyzdxm/019002/019002002/20221110/8a69ce88845d0ce301846087352e087eCY.html'
        # 'https://ggzyjy.sc.gov.cn/cyzdxm/019002/019002002/20221221/1181671806399197184_1.html'
        # 'https://ggzyjy.sc.gov.cn/jyxx/002002/002002003/20221116/8a69c5e5845d0d1401847ebcb1977ae1.html'
    )
    print('测试xpath接口：')
    project_number_xpath = analyzer.xextract_keyword_content(
        "项目编号", '//div[@class="protect" and @id="noticeArea"]',
        r'项目编号：([A-Za-z0-9]+)')
    print("project_number:", project_number_xpath)

    project_name_xpath = analyzer.xextract_keyword_content(
        "项目名称", '//div[@class="protect" and @id="noticeArea"]', r'项目名称：(.+)')
    print("project_name:", project_name_xpath)

    print('测试bs4接口：')
    project_number_bs4 = analyzer.bs4extract_keyword_content(
        "项目编号", r'项目编号：([A-Za-z0-9]+)', parent_tag='h4')
    print("get project_number by tag:", project_number_bs4)
    project_number_bs4 = analyzer.bs4extract_keyword_content(
        "项目编号", r'项目编号：([A-Za-z0-9]+)', parent_class='protect')
    print("get project_number by class:", project_number_bs4)
    project_number_bs4 = analyzer.bs4extract_keyword_content(
        "项目编号", r'项目编号：([A-Za-z0-9]+)', parent_id='noticeArea')
    print("get project_number by id:", project_number_bs4)

    project_name_bs4 = analyzer.bs4extract_keyword_content("项目名称",
                                                           r'项目名称：(.+)',
                                                           parent_tag='div')
    print("get project_name by tag:", project_name_bs4)
    project_name_bs4 = analyzer.bs4extract_keyword_content(
        "项目名称", r'项目名称：(.+)', parent_class='protect')
    print("get project_name by class:", project_name_bs4)
    project_name_bs4 = analyzer.bs4extract_keyword_content(
        "项目名称", r'项目名称：(.+)', parent_id='noticeArea')
    print("get project_name by id:", project_name_bs4)
    # else:
    #     print("Failed to load page")
