from .parser import Parser
from .core.models import NewsItem, Request, Response, Selectors, SpiderConfig, API
from .core.router import Router
from .utils import get_url_extension, markdownify_with_images
from typing import Literal, List, Optional, Self, AsyncGenerator
from playwright.async_api import async_playwright, Page
from pathlib import Path
import asyncio
from urllib.parse import urlparse, urlsplit, urlunsplit, urljoin
from parsel import Selector as Locator
import markdownify


class CrawlerBase:
    default_negative_selectors: List[str] = [
        "video",
        ".article-audio",
        ".backword",
        ".flex-row .icon-button",
        ".like-button",
        ".like-count",
        ".pc-qrcode__img",
        "[data-testid*='report-btn']",
        "[data-testid*='search-rec']",
        "[data-testid*='relate']",
        ".interact-btn",
        ".interact-desc",
        "#commentModule",
        ".post_top",
        ".post_next",
        ".post_wemedia",
        ".post_side_mod",
        ".post_crumb",
        ".post_statement",
        ".qqcom-follow-button",
        "a.report",
        ".item-wechat",
        ".item-comment",
        ".item-weibo",
        ".fixContainer",
        ".bangdan-mode",
        ".card-item-content",
        ".card-item-more",
        ".suspension-share",
        ".user-operate-box",
        ".user-comment",
        ".related-news-container",
        ".page-404",
        ".copyright"
    ]


class Crawler(CrawlerBase):
    def __init__(
        self,
        wait_until: Literal[
            "domcontentloaded", "load", "networkidle"
        ] = "domcontentloaded",
        selectors: dict = None,
        negative_selectors: Optional[List[str]] = None,
        use_readability: bool = True,
        router_config_path: str = None,
        headless=True,
        keep_page=False,
    ):
        """
        keep_page: 默认为False。设置为True会在请求页面后保留page对象到解析时使用，而不是通过过去page.content()，然后解析。主要是为了应对shadow dom的情形
        """
        self.wait_until = wait_until
        self.use_readability = use_readability
        self.headless = headless
        self.keep_page = keep_page
        self.event = asyncio.Event()
        if selectors:
            self.selectors = Selectors(**selectors)
        else:
            self.selectors = Selectors()
        self.router = Router(router_config_file=router_config_path)
        # negative_selectors 用于清理页面中不需要的元素
        self.negative_selectors = negative_selectors or []
        self.negative_selectors.extend(self.default_negative_selectors)
        # 和"爬取"相关的属性
        self.request_queue = asyncio.Queue()
        self.result_queue = asyncio.Queue()
        self.visited_urls = set()
        self.visited_urls_lock = asyncio.Lock()
        self.request_nums = 0
        self.request_nums_lock = asyncio.Lock()
        self.browser = None
        self.apis: List[API] = []

    async def __aenter__(self) -> Self:
        await self.init_playwright()
        return self

    async def __aexit__(self, *args, **kwargs):
        await self.browser.close()

    async def init_playwright(self):
        playwright = await async_playwright().start()
        self.browser = await playwright.chromium.launch(headless=self.headless)

    async def fetch(
        self,
        url: str | Request,
        meta: dict = None,
        return_fields: List[
            Literal[
                "content",
                "markdown",
                "title",
                "url",
                "raw_url",
                "text_content",
                "published_time",
                "navigators",
                "byline",
                "length",
                "next_urls",
            ]
        ] = None,
        config=None,
    ):
        meta = meta or {"use_readability": self.use_readability}
        assert self.browser
        return_fields = return_fields or ["content"]
        """调用浏览器打开url对应的网页，清洗后返回结构化数据
        """
        if isinstance(url, Request):
            req: Request = url
            url = str(req.url)
            if req.meta:
                meta = req.meta.update(meta)
        meta["keep_page"] = self.keep_page
        response = await self.goto(url, meta)
        selectors = self.router.dispatch(response) or self.selectors
        parser = Parser(selectors)
        item = await parser.parse(response)
        item = item.model_dump()
        ret = {}
        for key in item:
            if key in return_fields:
                ret[key] = item[key]
        if "markdown" in return_fields:
            # ret["markdown"] = convert_to_markdown(item["content"], default_title=True)
            # ret["markdown"] = markdownify.markdownify(item["content"])
            ret["markdown"] = markdownify_with_images(item["content"])

        if "next_urls" in return_fields:
            next_urls = []
            locator = Locator(text=response.content)
            for href in locator.xpath("//a/@href").getall():
                split_url = urlsplit(urljoin(str(response.url), href))
                next_normalized = urlunsplit(split_url._replace(fragment="")).rstrip(
                    "/"
                )
                if await self.filter(next_normalized, config=config):
                    next_urls.append(next_normalized)
            ret["next_urls"] = next_urls
        return ret

    async def crawl(
        self,
        start_urls: str | List[str],
        allowed_domains: List[str],
        max_depth: int = 3,
        max_requests: int = 10,
        max_workder: int = 4,
        allowed_paths: List[str] = None,
        blocked_paths: List[str] = None,
        return_fields: List[
            Literal[
                "content",
                "markdown",
                "title",
                "url",
                "raw_url",
                "text_content",
                "published_time",
                "navigators",
                "byline",
                "length",
                "next_urls",
            ]
        ] = None,
    ) -> AsyncGenerator[dict, None]:
        return_fields = return_fields or ["url", "title", "text_content"]
        allowed_paths = allowed_paths or []
        blocked_paths = blocked_paths or []
        config = SpiderConfig(
            allowed_domains=allowed_domains,
            allowed_paths=allowed_paths,
            blocked_paths=blocked_paths,
        )
        for url in start_urls:
            if await self.filter(url, config):
                await self.request_queue.put(Request(url=url, depth=1))
        for _ in range(max_workder):
            asyncio.create_task(
                self.crawl_worker(max_depth, max_requests, return_fields, config)
            )
        #
        self.register_apis(self.apis, config)
        while True:
            try:
                await asyncio.wait_for(self.event.wait(), timeout=1)
                # 达到最大请求数
                break
            except asyncio.TimeoutError:
                pass
            try:
                # 没有异常就说明全部请求任务执行完成
                await asyncio.wait_for(self.request_queue.join(), timeout=1)

                # 再判断api是否都执行完成
                for api in self.apis:
                    await asyncio.wait_for(api.event.wait(), timeout=1)
                break
            except asyncio.TimeoutError:
                """超时说明还有未完成任务"""
                await asyncio.sleep(1)
                try:
                    result = await asyncio.wait_for(self.result_queue.get(), 3)
                    yield result
                except:
                    pass
        # 以防还有结果没有输出
        while not self.result_queue.empty():
            result = await self.result_queue.get()
            yield result

    async def crawl_worker(self, max_depth, max_requests, return_fields, config):
        """不停的从队列中取Request，并执行，结果保存到result_queue"""
        while True:
            try:
                req: Request = await asyncio.wait_for(self.request_queue.get(), 10)

                try:
                    if req.depth > max_depth:
                        continue
                    return_fields_for_fetch = return_fields.copy()
                    return_fields_for_fetch.append("next_urls")
                    item = await self.fetch(
                        req, return_fields=return_fields_for_fetch, config=config
                    )
                    async with self.request_nums_lock:
                        self.request_nums += 1
                        if self.request_nums >= max_requests:
                            self.event.set()
                    await self.result_queue.put(item)
                    for url in item["next_urls"]:
                        if await self.filter(url, config):
                            await self.request_queue.put(
                                Request(url=url, depth=req.depth + 1)
                            )
                except:
                    pass
                finally:
                    self.request_queue.task_done()
            except asyncio.TimeoutError:
                await asyncio.sleep(1)

    async def goto(self, url, meta=None):
        meta = meta or {}
        browser = self.browser
        page = await browser.new_page(bypass_csp=True, ignore_https_errors=True)
        # 关键：在页面初始化时注入脚本，覆盖 attachShadow 方法
        await page.add_init_script(
            """
            // 保存原始方法
            const originalAttachShadow = Element.prototype.attachShadow;
            // 重写方法，强制 mode 为 open
            Element.prototype.attachShadow = function(options) {
                const openOptions = { ...options, mode: 'open' };
                return originalAttachShadow.call(this, openOptions);
            };
        """
        )
        await page.goto(url)
        try:
            await asyncio.wait_for(page.wait_for_load_state(self.wait_until), 10)
        except asyncio.TimeoutError:
            pass
        await page.wait_for_selector("body")
        await asyncio.sleep(3)
        await self.clean_page(page)
        if self.use_readability:
            article = await self.call_readability(page)
        else:
            article = {}
        meta["article"] = article
        html = await page.content()
        request = Request(url=url, meta=meta)
        response = Response(request=request, url=page.url, content=html)
        if meta.get("keep_page"):
            response.page = page
        else:
            await page.close()
        return response

    async def call_readability(self, page: Page):
        js_path = (Path(__file__).parent / Path("./data/html_cleaner.js")).resolve()
        with open(js_path, encoding="utf-8") as f:
            js_code = f.read()
        await page.add_script_tag(content=js_code)
        article = await page.evaluate("parse_page()")
        return article

    async def clean_page(self, page: Page):
        """页面清洗，分为两步
        1. 去除不必要的元素
        2. 调用readability库
        """
        # 百家号
        negative_selectors = ", ".join(self.negative_selectors)
        while await page.locator(negative_selectors).all():
            ele = page.locator(negative_selectors).nth(0)
            await ele.evaluate("el => el.remove()")

    async def filter(self, url, config: SpiderConfig) -> bool:
        url_parsed = urlparse(url)
        full_url: str = url_parsed.netloc + url_parsed.path
        # 只允许指定域名 和微信的域名
        if config.allowed_domains:
            if url_parsed.netloc == "mp.weixin.qq.com":
                return True
            if url_parsed.netloc not in config.allowed_domains:
                return False
        async with self.visited_urls_lock:
            if url in self.visited_urls:
                return False
        # 检查是否符合允许路径规则（以完整 netloc+path 匹配）
        if config.allowed_paths:
            if not any(full_url.startswith(p) for p in config.allowed_paths):
                return False
        # 检查是否命中禁止路径规则（以完整 netloc+path 匹配）
        if config.blocked_paths:
            if any(full_url.startswith(p) for p in config.blocked_paths):
                return False
        if get_url_extension(url) in (
            "jpg",
            "jpeg",
            "png",
            "bmp",
            "ico",
            "svg",
            "eps",
            "jfif",
            "pdf",
            "doc",
            "docx",
            "xlsx",
            "xls",
            "mp4",
        ):
            # 图片、pdf的链接不加入待采集队列
            return False
        return True

    def register_apis(self, apis: List[API], config):
        async def add_request(req: Request):
            if await self.filter(str(req.url), config):
                await self.request_queue.put(
                    Request(url=str(req.url), depth=req.depth + 1)
                )

        for api in apis:
            api.add_request = add_request
            api.event = asyncio.Event()
            asyncio.create_task(api.run())
