import re

from bs4 import BeautifulSoup

from frame.library.crawl4ai import *

browser_config = BrowserConfig(
    browser_type="chromium",
    user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/138.0.0.0 Safari/537.36",
    headless=True
)

crawler_run_config = CrawlerRunConfig(
    page_timeout=30000,  # 30 seconds
    delay_before_return_html=0,  # 返回html之前等待时间
)


class Crawl4aiUtil:
    def __init__(self, wait_time=0):
        self.wait_time = wait_time

    # 抓取页面文本内容 -> 返回text格式
    async def get_text_content(self, url, user_agent):
        if user_agent:
            user_agent = user_agent.lower()
            browser_config.user_agent = user_agent
        async with AsyncWebCrawler(config=browser_config) as crawler:
            wait_time = int(self.wait_time) if self.wait_time is not None else 1  # 设置默认值为 0
            if wait_time > 0:
                crawler_run_config.__setattr__('delay_before_return_html', wait_time)

            result = await crawler.arun(
                url=url,
                config=crawler_run_config
            )
            # print(result.markdown) # markdown格式
            html = result.html  # html内容

            # 将html代码转成text
            soup = BeautifulSoup(html, 'html.parser')

            # 移除不需要的元素
            for element in soup(['script', 'style', 'noscript', 'svg', 'nav', 'footer', 'head', 'meta']):
                element.decompose()

            # 获取纯文本
            text = soup.get_text(separator='\n', strip=True)
            text = self._clean_text(text)

            return text

    # 抓取页面文本内容 -> 返回markdown格式
    async def get_markdown_content(self, url, user_agent=None):
        if user_agent:
            user_agent = user_agent.lower()
            browser_config.user_agent = user_agent
        async with AsyncWebCrawler(config=browser_config) as crawler:
            wait_time = int(self.wait_time) if self.wait_time is not None else 1  # 设置默认值为 0
            if wait_time > 0:
                crawler_run_config.__setattr__('delay_before_return_html', wait_time)

            result = await crawler.arun(
                url=url,
                config=crawler_run_config
            )
            markdown = result.markdown  # markdown格式
            return markdown

    def _clean_text(self, text):
        text = re.sub(r'\s+', ' ', text)
        text = re.sub(r'[\x00-\x1F\x7F]', '', text)
        return text.strip()
