"""
## 注意：
    如果采用有头模式，会将浏览器移出桌面显示区域从而不可见。
"""
from playwright.sync_api import sync_playwright, Page
from typing import Mapping
from pydantic import BaseModel, Field, HttpUrl
import re
from pathlib import Path
from html_to_markdown import convert_to_markdown
from bs4 import Tag, NavigableString
from html_to_markdown.converters import Converter


class LinkTextOnlyConverter(Converter):
    """
    改进版 LinkTextOnlyConverter
    功能：
      - href 长度 ≤ 100 时保留超链接为 [text](href)
      - 否则只返回文字内容
    同时兼容多种调用签名：
      - (el, text, convert_as_inline=False, **kwargs)
      - (text, convert_as_inline=False, **kwargs)
      - (el,) 或通过 kwargs 传入 el/text 等
    """

    def __call__(self, *args, **kwargs):
        el = None
        text = ""

        # --- 兼容多种调用方式 ---
        if len(args) >= 2:
            first, second = args[0], args[1]
            if isinstance(first, Tag):
                el = first
                text = second or ""
            else:
                text = first or ""

        elif len(args) == 1:
            only = args[0]
            if isinstance(only, Tag):
                el = only
                text = only.get_text() or ""
            else:
                text = only or ""

        else:
            el = kwargs.get("el") or kwargs.get("tag")
            text = kwargs.get("text") or ""
            if el is not None and not text:
                try:
                    text = el.get_text()
                except Exception:
                    text = str(el)

        # --- 转为纯文本 ---
        if isinstance(text, (Tag, NavigableString)):
            text = text.get_text()
        text = (text or "").strip()

        # --- 逻辑部分：处理 <a> 的 href ---
        href = ""
        if isinstance(el, Tag):
            href = el.get("href", "") or ""

        # 无效链接直接返回文字
        if not href or href.lower().startswith("javascript:"):
            return f"{text} "

        # 短链接保留为 Markdown 超链接
        if len(href) <= 100:
            return f"[{text}]({href})" if text else f"<{href}>"

        # 超过长度限制，仅返回文字
        return f"{text} "

    # 兼容库的 convert 调用方式
    def convert(self, el, text, convert_as_inline=False, **kwargs):
        if text is None and isinstance(el, Tag):
            text = el.get_text()
        return self.__call__(el, text, convert_as_inline, **kwargs)


def is_css_or_xpath(expression: str) -> str:
    """
    判断表达式是 CSS 选择器还是 XPath 表达式。

    Args:
        expression: 待判断的字符串（CSS 或 XPath 表达式）。

    Returns:
        'css'：判断为 CSS 选择器；
        'xpath'：判断为 XPath 表达式；
        'unknown'：无法确定类型。
    """
    # 去除首尾空白（避免空格影响判断）
    expr = expression.strip()
    if not expr:
        return "unknown"

    # 特征 1：XPath 常以 / 或 // 开头（绝对/相对路径）
    if expr.startswith(("/ ", "//", "/")):
        return "xpath"

    # 特征 2：XPath 用 @ 表示属性（如 @id、[@class]）
    # 排除 CSS 中极少见的 @ 规则（如 @media，但不属于选择器）
    if re.search(r"[@]\w+", expr) or re.search(r"\[\s*@", expr):
        return "xpath"

    # 特征 3：XPath 特有函数或轴（如 text()、contains()、ancestor::）
    xpath_specific_patterns = (
        r"text\(\)",
        r"contains\(",
        r"starts-with\(",
        r"ancestor::",
        r"following-sibling::",
        r"preceding-sibling::",
        r"position\(\)",
    )
    if any(re.search(pattern, expr) for pattern in xpath_specific_patterns):
        return "xpath"

    # 特征 4：CSS 特有语法（.class、#id、属性选择器无 @）
    # 若包含 . 或 # 开头，且无 XPath 特征，则倾向于 CSS
    if re.match(r"^[.#]", expr):
        return "css"

    # 特征 5：CSS 属性选择器无 @（如 [name="xxx"]），XPath 有 @（如 [@name="xxx"]）
    if "[" in expr:
        # 检查属性选择器中是否有 @
        if not re.search(r"\[\s*@", expr):
            return "css"

    # 边缘情况：简单标签名（如 'div' 既可作为 CSS 也可作为 XPath）
    # 优先判断为 CSS（更常见）
    return "css"


class FetchResult(BaseModel):
    url: HttpUrl = Field(description="URL")
    markdown: str = Field(description="解析出的Markdown", default="")
    html: str = Field(description="html", default="")

class Fetcher:
    def __init__(self, proxy=None, stealth=False, headless=True, visible=False):
        self.proxy = proxy
        self.stealth = stealth
        self.headless = headless
        self.visible = visible
    def get(self, url) -> Mapping:
        proxy = {"server": self.proxy, "bypass": "localhost,127.0.0.1,*.local"}
        original_request_failed = False

        def monitor_origin_request(response):
            nonlocal original_request_failed
            if response.request.url == url and response.status in [404, 403]:
                original_request_failed = True
                # page.stop_loading()

        with sync_playwright() as p:
            anti_anti_spider_args = [
                "--disable-blink-features=AutomationControlled",
                "--no-sandbox",
                "--start-maximized",
                "--window-position=-32000,-32000", "--window-size=1920,1080"]
            if self.visible:
                anti_anti_spider_args.remove("--window-position=-32000,-32000")
            if self.proxy:
                browser = p.chromium.launch(
                    proxy=proxy, args=anti_anti_spider_args, headless=self.headless
                )
            else:
                browser = p.chromium.launch(
                    args=anti_anti_spider_args, headless=self.headless
                )
            context = browser.new_context(
                java_script_enabled=True,  # 仅保留必要JS
            )
            if self.stealth:
                # 添加指纹仿真
                js_path = (
                    Path(__file__).parent / Path("./data/stealth.min.js")
                ).resolve()
                with open(js_path, encoding="utf-8") as f:
                    js_code = f.read()
                context.add_init_script(js_code)
            page = context.new_page()

            # page.set_viewport_size({"width": 1920, "height": 1080})
            page.on("response", monitor_origin_request)
            page.goto(url)
            # page.screenshot(path="webdriver_disabled.png")
            origin_content = page.content()
            if original_request_failed:
                return {"url": url, "markdown": None, "html": origin_content}
            self.clean_page(page, ["._3PLyv", ".goog-te-gadget", "//div[contains(@id, 'targetLanguage')]", "nav", ".site-header-item", ".footer-html", ".nav", "svg", ".menu", "#language", "#top_nav", "#google_translate_element", "script"])
            article = self.call_readability(page=page)
            content = page.locator("body").inner_html()
            markdown = convert_to_markdown(
                content, custom_converters={"a": LinkTextOnlyConverter()}, strip=["img"]
            )
            # item = FetchResult(url=url, markdown=article["page_md"])
            item = FetchResult(url=url, markdown=markdown, html=origin_content)
            return item.model_dump()

    def clean_page(self, page: Page, negative_selectors=[]):
        """页面清洗，分为两步
        1. 去除不必要的元素
        2. 调用readability库
        """
        # 百家号
        # negative_selectors = ", ".join(negative_selectors)
        css_selectors = ", ".join(
            list(filter(lambda i: is_css_or_xpath(i) == "css", negative_selectors))
        )

        while page.locator(css_selectors).all():
            ele = page.locator(css_selectors).nth(0)
            ele.evaluate("el => el.remove()")
        for selector in filter(
            lambda i: is_css_or_xpath(i) == "xpath", negative_selectors
        ):
            try:
                page.locator(selector).wait_for(state="attached", timeout=0.1)
            except:
                continue
            # ele = page.locator(selector).nth(0)
            for ele in page.locator(selector).all():
                ele.evaluate("el => el.remove()")

    def call_readability(self, page: Page):
        js_path = (Path(__file__).parent / Path("./data/html_cleaner.js")).resolve()
        with open(js_path, encoding="utf-8") as f:
            js_code = f.read()
        page.add_script_tag(content=js_code)
        article = page.evaluate("parse_page()")
        return article


if __name__ == "__main__":
    from qwen import Qwen

    fetcher = Fetcher(proxy="socks5://localhost:21001", headless=True)
    url = "https://bitterrootpubliclibrary.org/about/staff/"
    url = "https://www.pomonaca.gov/government/departments/library/staff"
    url = "https://www.pomonaca.gov/home"
    url = "https://www.newbritainlibrary.org/contact"
    url = "https://www.enfieldpubliclibrary.org/staff"
    url = "https://www.cromwellct.com/library"
    url = "https://www.newfairfieldlibrary.org/"
    url = "https://www.enfieldpubliclibrary.org/staff"
    url = "https://www.enfieldpubliclibrary.org/staff"
    url = "https://www.in.gov/jaspercountypubliclibrary/about/contact"
    url = "https://lapl.overdrive.com/media/12175145"
    result = fetcher.get(url)
    print(result["markdown"])
    prompt = "下面内容来源于网页，请进行梳理，保留关键信息（尤其是员工和联系方式信息），整理成markdown格式。\n%s"%result["markdown"]
    qwen = Qwen("qwen-plus", "sk-3a35db22d0a64e4f9f6ccb9fdc7cf3e4")
    ret = qwen.ask(prompt)
    pass
