import requests
import re
from bs4 import BeautifulSoup
from lxml import etree

def has_no_punctuation(text):
    # 注意：反斜杠 \ 需要使用双反斜杠转义 \\，中括号内的 - 也需要注意位置或转义
    punctuation_pattern = r"[，。！？；：、“”‘’（）()<>《》【】\[\]{}\"'`~!@#$%^&*+=|\\/.,:?-]"
    return not re.search(punctuation_pattern, text)


def is_application_field(text):
    text = text.strip()

    # 模糊关键词组合（支持开头若干字符后出现这些词）
    keywords = [
        '应用领域',
        '应用范围',
        '适用范围',
        '适用行业',
        '适应行业',
        '应用行业',
    ]

    # 构造正则：匹配任意前缀 + 关键词 + 冒号/空格
    pattern = r'^(?:(?!。).{0,20})(' + '|'.join(keywords) + r')[：:\s]'

    return re.match(pattern, text) is not None

def extract_section(html, section_keywords, stop_keywords):
    tree = etree.HTML(html)
    ps = tree.xpath('//p|//h1')

    result = []
    capturing = False

    for p in ps:
        text = ''.join(p.xpath('.//text()')).strip()
        if text.endswith(tuple([':','：'])):
            text = text.replace(':','').replace('：','')
        # 判断是否是标题段落
        if any(kw in text for kw in section_keywords) and text.endswith(tuple(section_keywords)):
            capturing = True
            res_p_lis = p.xpath('./following-sibling::*')
            break
        if any(kw in text for kw in section_keywords):
            if (':' in text or '：' in text) and len(text) > 6 and text.endswith(tuple(['。','；'])):
                return text
            if is_application_field(text):
                return text
            if len(text) < 21:
                capturing = True
                res_p_lis = p.xpath('./following-sibling::*')
                break


    if capturing:
        text_first = p.xpath('.//following::div[@class="lh_cpdetailtext"][1]//text()')
        if text_first:
            return ''.join(text_first).strip()
    if capturing:
        for res_p in res_p_lis:
            text = ''.join(res_p.xpath('.//text()')).strip().replace('\xa0', '').replace(' ', '')
            # 判断是否是终止段落
            if any(stop_kw in text for stop_kw in stop_keywords):
                break
            # 或：本段看起来像标题（短句、无标点）
            if len(text) < 15 and ('。' not in text and '，' not in text):
                if '：' in text or ':' in text or text == '':
                    break
                if len(text) < 7 and not text.startswith(tuple(['·','Ø'])):
                    break
                if has_no_punctuation(text) and not text.startswith(tuple(['·','Ø'])):
                    result.append(text)
                    break
            # 加入正文内容
            result.append(text)

    return '\n'.join(result)


def extract_application_fields(html_content,section_keywords):
    """
    从包含产品描述的HTML中提取应用领域部分

    参数:
        html_content (str): HTML字符串

    返回:
        str: 提取到的应用领域文本内容
    """
    soup = BeautifulSoup(html_content, 'html.parser')

    # 查找所有<p>标签
    paragraphs = soup.find_all('p')

    # 用于存储结果的变量
    application_text = ""

    # 遍历所有段落
    for p in paragraphs:
        # 检查段落中是否包含"应用领域"关键词
        if any(kw in p.get_text() for kw in section_keywords):
        # if "应用领域" in p.get_text():
            # 定位到段落中的第一个<br>标签
            first_br = p.find('br')

            if first_br:
                # 提取<br>之后的所有文本内容
                application_text = ''.join(
                    [text.strip() for text in first_br.next_siblings
                     if isinstance(text, str)]
                ).strip()
                break

    return application_text


def get_application(html):
    section_keywords = ['应用领域', '应用范围', '适应行业','适用范围']
    stop_keywords = ['产品特点', '联系方式', '公司简介', '技术参数']

    content = extract_section(html, section_keywords, stop_keywords)
    if not content:
        content = extract_application_fields(html, section_keywords)
    return content