from __future__ import annotations
import os
import hashlib
from typing import Optional, AsyncGenerator, List, Callable, Awaitable
from contextlib import asynccontextmanager

from playwright.async_api import async_playwright, Browser, Playwright, Page, Error as PlaywrightError, TimeoutError as PlaywrightTimeoutError

from axiom_boot.conf.manager import Settings
from axiom_boot.di import service, conditional_on_setting
from axiom_boot.logging.setup import get_logger
from axiom_boot.scraper.exceptions import ScraperException, DownloadError
from axiom_boot.scraper.interfaces import Downloader
from axiom_boot.scraper.models import Target, Response

logger = get_logger(__name__)

class PlaywrightDownloaderImpl(Downloader):
    """
    Playwright 下载器的具体实现，它依赖于一个外部管理的浏览器实例。
    """

    def __init__(self, browser: Browser, settings: Settings):
        self._browser = browser
        self._settings = settings

    async def download(self, target: Target) -> Response:
        context = None
        page = None
        try:
            # 修正 user_agent 的属性名
            user_agent = self._settings.scraper.default_user_agent
            if target.headers and "User-Agent" in target.headers:
                user_agent = target.headers["User-Agent"]

            context_options: dict = {
                "user_agent": user_agent,
                "viewport": {'width': 1920, 'height': 1080}
            }

            proxy_url = target.metadata.get("proxy")
            if proxy_url:
                from urllib.parse import urlparse
                parsed_proxy = urlparse(proxy_url)
                context_options["proxy"] = {
                    "server": f"{parsed_proxy.scheme}://{parsed_proxy.hostname}:{parsed_proxy.port}",
                    "username": parsed_proxy.username,
                    "password": parsed_proxy.password
                }

            context = await self._browser.new_context(**context_options)

            cookies = target.metadata.get("cookies")
            if cookies:
                logger.info(f"正在为上下文加载 {len(cookies)} 个 Cookies...")
                # 增强：智能判断 cookies 格式
                # 格式1: [{}, {}, ...] (从 EditThisCookie 等插件导出的标准格式)
                if isinstance(cookies, list) and all(isinstance(c, dict) for c in cookies):
                    # 修正：处理不规范的 sameSite 值
                    processed_cookies = []
                    valid_same_site_values = {"Lax", "Strict", "None"}
                    
                    for cookie in cookies:
                        # 深度复制，避免修改原始数据
                        new_cookie = cookie.copy()
                        
                        # 检查并修正 sameSite
                        if "sameSite" in new_cookie:
                            original_value = new_cookie["sameSite"]
                            # 1. 统一处理大小写
                            capitalized_value = original_value.capitalize()
                            
                            # 2. 检查是否为有效值，否则使用默认值
                            if capitalized_value in valid_same_site_values:
                                new_cookie["sameSite"] = capitalized_value
                            else:
                                new_cookie["sameSite"] = "Lax"
                        
                        processed_cookies.append(new_cookie)
                        
                    await context.add_cookies(processed_cookies)
                # 格式2: {"name1": "value1", "name2": "value2"} (简单的键值对)
                elif isinstance(cookies, dict):
                    await context.add_cookies(
                        [{'name': k, 'value': v, 'url': target.url} for k, v in cookies.items()]
                    )
                else:
                    logger.warning(f"提供了未知的 Cookie 格式，已忽略。类型: {type(cookies)}")


            if target.cookies:
                await context.add_cookies(
                    [{'name': k, 'value': v, 'url': target.url} for k, v in target.cookies.items()])

            # 在上下文创建完毕后启动跟踪
            if target.metadata.get("debug"):
                logger.info("Debug 模式已开启，启动 Playwright 跟踪...")
                await context.tracing.start(screenshots=True, snapshots=True, sources=True)

            page = await context.new_page()

            post_load_actions = target.metadata.get("post_load_actions")

            logger.info(f"正在导航到页面: {target.url}")
            await page.goto(target.url, wait_until="domcontentloaded", timeout=self._settings.scraper.playwright_timeout)

            if post_load_actions:
                logger.info(f"正在执行 {len(post_load_actions)} 个页面加载后操作...")
                for action in post_load_actions:
                    await action(page, target)

            content = await page.content()

            if target.metadata.get("debug"):
                debug_path = "storage/debug"
                os.makedirs(debug_path, exist_ok=True)
                url_hash = hashlib.md5(target.url.encode()).hexdigest()
                screenshot_path = os.path.join(debug_path, f"{url_hash}.png")
                html_path = os.path.join(debug_path, f"{url_hash}.html")
                await page.screenshot(path=screenshot_path, full_page=True)
                with open(html_path, "w", encoding="utf-8") as f:
                    f.write(content)
                logger.info(f"页面快照已保存到 {screenshot_path} 和 {html_path}")

            return Response(
                url=page.url,
                text=content,
                content=content.encode('utf-8'),  # 添加缺失的 content 字段
                status_code=200,
                headers={},
                cookies={c['name']: c['value'] for c in await context.cookies()},
                target=target
            )
        except (PlaywrightError, PlaywrightTimeoutError) as e:
            logger.error(f"Playwright 在处理 {target.url} 时发生错误: {e}")
            raise DownloadError(f"Playwright error on {target.url}") from e
        finally:
            if target.metadata.get("debug") and context:
                trace_path = "storage/debug/trace.zip"
                logger.info(f"停止跟踪并保存到 {trace_path} ...")
                await context.tracing.stop(path=trace_path)
                logger.info("跟踪文件已保存。您可以使用 'npx playwright show-trace storage/debug/trace.zip' 命令来查看。")

            if page:
                await page.close()
            if context:
                await context.close()


@service(name="playwright")
@conditional_on_setting(key="scraper__playwright_enabled", expected_value=True)
async def create_playwright_downloader(settings: Settings) -> AsyncGenerator[Downloader, None]:
    """
    一个异步生成器工厂，负责创建和管理 PlaywrightDownloader 的生命周期。
    """
    playwright: Optional[Playwright] = None
    browser: Optional[Browser] = None
    try:
        logger.info("正在初始化 Playwright 下载器...")
        playwright = await async_playwright().start()

        endpoint = settings.scraper.playwright_browser_endpoint
        if endpoint:
            logger.info(f"Playwright 正在连接到已存在的浏览器: {endpoint}")
            browser = await playwright.chromium.connect_over_cdp(endpoint)
        else:
            logger.info(f"Playwright 正在启动一个新的浏览器实例 (headless={settings.scraper.playwright_headless})...")
            browser = await playwright.chromium.launch(headless=settings.scraper.playwright_headless)

        downloader_instance = PlaywrightDownloaderImpl(browser, settings)
        yield downloader_instance
    finally:
        logger.info("正在关闭 Playwright 下载器并释放资源...")
        if browser and browser.is_connected():
            await browser.close()
            logger.info("Playwright 浏览器实例已关闭。")
        if playwright:
            await playwright.stop()
            logger.info("Playwright 实例已停止。") 