import platform
import time
import requests
import re
from bs4 import BeautifulSoup
from playwright.sync_api import sync_playwright
from ..exception.ApiException import ApiException


# 爬虫构造类
class PlaywrightUtil:
    def __init__(self):
        self.min_content_length = 200

    # 异步方法
    def scrape(self, url, wait_time):
        try:
            # 先尝试静态抓取
            static_text = self.scrape_static_content(url)
            if static_text and len(static_text) > self.min_content_length:
                return static_text

            # 静态失败时启用动态渲染
            dynamic_text = self.scrape_dynamic_content(url, wait_time)
            if dynamic_text:
                return dynamic_text
        except ApiException as e:
            raise e
        except Exception as e:
            raise ApiException(msg=str(e))

    # 文本清洗优化
    def _clean_text(self, text):
        text = re.sub(r'\s+', ' ', text)
        text = re.sub(r'[\x00-\x1F\x7F]', '', text)
        return text.strip()

    # 爬取静态内容
    def scrape_static_content(self, url):
        try:
            response = requests.get(url, timeout=10)
            response.encoding = response.apparent_encoding  # 自动检测编码
            soup = BeautifulSoup(response.text, 'html.parser')

            # 移除不需要的元素
            for element in soup(['script', 'style', 'noscript', 'svg', 'nav', 'footer', 'head', 'meta']):
                element.decompose()

            # 获取纯文本
            text = soup.get_text(separator='\n', strip=True)
            text = self._clean_text(text)

            return text
        except ApiException as e:
            raise e
        except Exception as e:
            raise ApiException(msg=str(e))

    # 抓取动态内容
    def scrape_dynamic_content(self, url, wait_time):
        try:
            with sync_playwright() as p:
                # browser = p.chromium.launch() # ubuntu系统 可以不用配置参数

                # 判断是否为 Windows 系统
                if platform.system() == 'Windows':
                    executable_path = "C:/Users/Administrator/AppData/Local/Google/Chrome/Bin/chrome.exe"
                else:
                    executable_path = None

                browser = p.chromium.launch(
                    headless=True,
                    executable_path=executable_path,
                    args=[
                        "--no-sandbox",
                        "--disable-dev-shm-usage",
                        "--ignore-certificate-errors",
                        "--ignore-urlfetcher-cert-requests"
                    ]
                )  # 无头模式

                browser = browser.new_context(ignore_https_errors=True)

                page = browser.new_page()

                # 设置超时和加载策略
                page.set_default_timeout(60000)  # 60秒超时
                page.goto(url, wait_until="domcontentloaded")

                wait_time = int(wait_time) if wait_time is not None else 1  # 设置默认值为 0

                # 等待动态内容加载（根据实际需求调整）
                if wait_time > 0:
                    time.sleep(wait_time)  # 简单等待，可替换为 page.wait_for_selector()
                else:
                    page.wait_for_selector('div')

                # 获取完整渲染后的 HTML
                html_content = page.content()
                soup = BeautifulSoup(html_content, 'html.parser')

                # # 移除不需要的元素
                for element in soup(['script', 'style', 'noscript', 'svg', 'nav', 'footer', 'head', 'meta']):
                    element.decompose()

                # 示例：提取标题（按需自定义解析逻辑）
                title = page.title()

                # 智能定位正文
                html_content = soup.find(['article', 'main', 'div.content']) or soup.body
                html_content = html_content.get_text(separator=' ', strip=True)

                browser.close()

                return html_content
        except ApiException as e:
            raise e
        except Exception as e:
            raise ApiException(msg=str(e))
