import math
import re
import time

import html2text
import requests
from bs4 import BeautifulSoup
from langchain_community.llms.ollama import Ollama
from selenium import webdriver
from selenium.webdriver.chrome.options import Options

model = ['phi3', 'llava-phi3', 'dolphin-llama3']
ollama = Ollama(
    base_url='http://localhost:11434',
    model=model[2]
)
class HtmlControl:
    def getHtmlContent(url="https://zhuanlan.zhihu.com/p/493383590"):
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/126.0.0.0 Safari/537.36 Edg/126.0.0.0"
        }
        response = requests.get(url, headers=headers)
        try:
            html = response.content.decode('utf-8')  # 设定解码模式为utf-8
        except Exception as e:
            return

        # 创建Beautiful Soup对象
        soup = BeautifulSoup(html, 'html.parser')
        # 获取<head>中的<title>文本内容
        try:
            title = soup.head.title.text.strip()
        except:
            title = ''

        for a_tag in soup.find_all('a'):
            a_tag.extract()
        # 使用CSS选择器定位目标元素
        div_element = soup.select_one('article')
        if not div_element:
            div_element = soup.select_one('html')

        # 计算每个段落的文字密度
        paragraphs = div_element.find_all('p') if div_element else []
        density_scores = []
        for paragraph in paragraphs:
            text = paragraph.get_text(strip=True)
            # print(text)
            word_count = len(re.findall(r'[\u4e00-\u9fff]', text))
            # print(word_count)
            if word_count > 0:
                density = len(text) / word_count
            else:
                density = 0
            density_scores.append((paragraph, density))

        # 按照文字密度分数降序排序段落
        density_scores.sort(key=lambda x: x[1], reverse=True)

        # 选择基于密度分数的顶部段落
        top_paragraphs = density_scores[:math.ceil(len(density_scores) * 0.3)]  # 根据需要调整比例

        # 提取并连接顶部段落的文本内容
        main_content = '\n'.join([paragraph.get_text(strip=True) for paragraph, density in top_paragraphs])

        if main_content == '':
            # 当网页内容无法获取的时候
            # 创建Chrome WebDriver的选项对象
            options = Options()
            # 启用无界面模式
            options.add_argument('--headless')
            driver = webdriver.Chrome(options=options)
            driver.get(url)
            # 创建BeautifulSoup对象
            html = driver.page_source
            soup = BeautifulSoup(html, 'html.parser')

            # 使用CSS选择器定位目标元素
            div_element = soup.select_one('article')
            timeout = 5  # 设置超时时间为5秒
            start_time = time.time()
            while not div_element and time.time() - start_time < timeout:
                try:
                    html = driver.page_source
                    soup = BeautifulSoup(html, 'html.parser')
                    # 使用CSS选择器定位目标元素
                    div_element = soup.select_one('article')
                except:
                    div_element = None

            if not div_element:
                div_element = soup.select_one('html')

            soup = BeautifulSoup(html, 'html.parser')
            # 获取<head>中的<title>文本内容
            try:
                title = soup.head.title.text.strip()
            except:
                title = ''

            # 获取元素内容或者进一步处理
            div_content = div_element.text  # 或者使用div_element.text来获取文本内容
            markdown = html2text.html2text(div_content)
            driver.close()
            return markdown.strip('\t'),title
        # markdown = re.sub(r'\*?\!?\s*\[.*?\]\(.*?\)', '', markdown)
        return main_content.strip('\t'),title


if __name__ == '__main__':
    # str = ChromeDriverManager().install()
    # service = ChromeService(executable_path="C:\\Users\\sumengxian\\.wdm\\drivers\\chromedriver\\win64\\126.0.6478.126\\chromedriver-win32/chromedriver.exe")
    HtmlControl.getHtmlContent()
