"""Resource Detector - Sniffs media resources from web pages."""

import json
from dataclasses import dataclass

from loguru import logger
from playwright.async_api import Page, Request

from .browser import BrowserController


@dataclass
class SniffedResource:
    """Represents a sniffed media resource from a web page."""

    url: str
    resource_type: str  # video, audio, m3u8, mpd, etc.
    size: int | None = None
    duration: float | None = None
    resolution: str | None = None
    title: str | None = None
    thumbnail: str | None = None
    headers: dict | None = None

    def to_dict(self) -> dict:
        """Converts the SniffedResource object to a dictionary."""
        return {
            "url": self.url,
            "resource_type": self.resource_type,
            "size": self.size,
            "duration": self.duration,
            "resolution": self.resolution,
            "title": self.title,
            "thumbnail": self.thumbnail,
            "headers": self.headers,
        }


class ResourceDetector:
    """Detects and sniffs media resources from web pages."""

    # 媒体资源的MIME类型
    MEDIA_MIME_TYPES = {
        "video/mp4",
        "video/webm",
        "video/ogg",
        "video/x-matroska",
        "audio/mpeg",
        "audio/mp4",
        "audio/webm",
        "audio/ogg",
        "application/vnd.apple.mpegurl",  # m3u8
        "application/dash+xml",  # mpd
        "application/x-mpegURL",
    }

    # 媒体资源的URL模式
    MEDIA_URL_PATTERNS = [
        r"\.mp4",
        r"\.webm",
        r"\.m3u8",
        r"\.mpd",
        r"\.mp3",
        r"\.m4a",
        r"\.flv",
        r"\.ts",
        r"\.mkv",
    ]

    def __init__(self, browser: BrowserController):
        """Initializes the ResourceDetector.

        Args:
            browser (BrowserController): The browser controller instance.
        """
        self.browser = browser
        self._detected_resources: dict[str, list[SniffedResource]] = {}

    async def sniff_page(
            self, url: str, wait_time: int = 5, page_id: str | None = None
    ) -> list[SniffedResource]:
        """Sniffs media resources from the given URL.

        Args:
            url (str): The target URL to sniff.
            wait_time (int): Time to wait in seconds for additional resources to load.
            page_id (str | None): Optional page ID for the browser.

        Returns:
            list[SniffedResource]: A list of sniffed resources.
        """
        logger.info(f"开始嗅探页面: {url}")

        # 生成页面ID
        if page_id is None:
            page_id = f"sniff_{hash(url)}"

        # 创建新页面
        page = await self.browser.new_page(page_id=page_id)
        resources: list[SniffedResource] = []

        # 设置请求拦截
        async def handle_request(request: Request):
            """处理网络请求"""
            resource_type = request.resource_type
            request_url = request.url

            # 检查是否为媒体资源
            if await self._is_media_resource(request):
                logger.debug(f"检测到媒体资源: {request_url}")

                # 提取资源信息
                resource = SniffedResource(
                    url=request_url,
                    resource_type=self._determine_resource_type(request_url),
                    headers=dict(request.headers),
                )

                resources.append(resource)

            # 继续请求
            await request.continue_()

        # 启用路由拦截
        await page.route("**/*", handle_request)

        # 导航到页面
        try:
            await page.goto(url, wait_until="networkidle", timeout=30000)
        except Exception as e:
            logger.warning(f"页面加载超时，继续嗅探: {e}")

        # 等待额外的资源加载
        import asyncio

        await asyncio.sleep(wait_time)

        # 尝试从页面注入的结果中获取（如果使用了cat-catch扩展）
        try:
            injected_results = await self._get_injected_results(page)
            if injected_results:
                resources.extend(injected_results)
                logger.info(f"从注入脚本获取到 {len(injected_results)} 个资源")
        except Exception as e:
            logger.debug(f"未能获取注入结果: {e}")

        # 关闭页面
        await self.browser.close_page(page_id)

        # 去重
        unique_resources = self._deduplicate_resources(resources)

        logger.success(f"嗅探完成，共发现 {len(unique_resources)} 个资源")
        self._detected_resources[url] = unique_resources

        return unique_resources

    async def _is_media_resource(self, request: Request) -> bool:
        """Determines if a given request is for a media resource.

        Args:
            request (Request): The Playwright request object.

        Returns:
            bool: True if the request is for a media resource, False otherwise.
        """
        # 检查资源类型
        if request.resource_type in ("media", "video", "audio"):
            return True

        # 检查MIME类型
        content_type = request.headers.get("content-type", "")
        if any(mime in content_type for mime in self.MEDIA_MIME_TYPES):
            return True

        # 检查URL模式
        url = request.url.lower()
        import re

        for pattern in self.MEDIA_URL_PATTERNS:
            if re.search(pattern, url):
                return True

        return False

    @staticmethod
    def _determine_resource_type(url: str) -> str:
        """Determines the resource type based on the URL.

        Args:
            url (str): The URL of the resource.

        Returns:
            str: The determined resource type (e.g., 'm3u8', 'mpd', 'video', 'audio', 'unknown').
        """
        url_lower = url.lower()

        if ".m3u8" in url_lower:
            return "m3u8"
        elif ".mpd" in url_lower:
            return "mpd"
        elif any(ext in url_lower for ext in [".mp4", ".webm", ".mkv", ".flv"]):
            return "video"
        elif any(ext in url_lower for ext in [".mp3", ".m4a", ".ogg"]):
            return "audio"
        else:
            return "unknown"

    async def _get_injected_results(self, page: Page) -> list[SniffedResource]:
        """Retrieves injected results from the page (e.g., from a browser extension).

        Args:
            page (Page): The Playwright page object.

        Returns:
            list[SniffedResource]: A list of sniffed resources from injected script, or an empty list if none found or an error occurred.
        """
        # 检查是否有注入的结果元素
        script = """
        () => {
            const resultDiv = document.getElementById('swiftshadow-results');
            if (resultDiv) {
                return resultDiv.textContent;
            }
            return null;
        }
        """

        result_json = await page.evaluate(script)

        if result_json:
            try:
                results = json.loads(result_json)
                resources = []

                for item in results:
                    resource = SniffedResource(
                        url=item.get("url", ""),
                        resource_type=item.get("type", "unknown"),
                        size=item.get("size"),
                        duration=item.get("duration"),
                        resolution=item.get("resolution"),
                        title=item.get("title"),
                        thumbnail=item.get("thumbnail"),
                    )
                    resources.append(resource)

                return resources
            except json.JSONDecodeError as e:
                logger.error(f"解析注入结果失败: {e}")

        return []

    @staticmethod
    def _deduplicate_resources(resources: list[SniffedResource]) -> list[SniffedResource]:
        """Removes duplicate resources from a list based on their URL.

        Args:
            resources (list[SniffedResource]): A list of sniffed resources.

        Returns:
            list[SniffedResource]: A new list containing only unique resources.
        """
        seen_urls = set()
        unique = []

        for resource in resources:
            if resource.url not in seen_urls:
                seen_urls.add(resource.url)
                unique.append(resource)

        return unique

    def get_cached_resources(self, url: str) -> list[SniffedResource] | None:
        """Retrieves cached sniffed resources for a given URL.

        Args:
            url (str): The URL for which to retrieve cached resources.

        Returns:
            list[SniffedResource] | None: A list of sniffed resources if found in cache, otherwise None.
        """
        return self._detected_resources.get(url)

    def clear_cache(self, url: str | None = None) -> None:
        """Clears the cache of detected resources.

        Args:
            url (str | None): The URL for which to clear the cache. If None, all cached resources are cleared.
        """
        if url:
            self._detected_resources.pop(url, None)
            logger.debug(f"已清除缓存: {url}")
        else:
            self._detected_resources.clear()
            logger.debug("已清除所有缓存")


async def sniff_url(url: str, headless: bool = True, wait_time: int = 5) -> list[SniffedResource]:
    """Convenience function to sniff resources from a given URL.

    Args:
        url (str): The target URL to sniff.
        headless (bool): Whether to run the browser in headless mode.
        wait_time (int): Time to wait in seconds for additional resources to load.

    Returns:
        list[SniffedResource]: A list of sniffed resources.
    """
    async with BrowserController(headless=headless) as browser:
        detector = ResourceDetector(browser)
        return await detector.sniff_page(url, wait_time=wait_time)
