from typing import List, Optional

from axiom_boot.di import service, primary, autowired
from axiom_boot.conf.manager import Settings
from axiom_boot.scraper.interfaces import Downloader
from axiom_boot.scraper.models import Target, Response
from axiom_boot.scraper.exceptions import DownloadError

import httpx


@primary
@service()
class HttpxDownloader(Downloader):
    """
    基于 HTTPX 库的默认下载器实现。
    它支持异步请求，性能高，适用于绝大多数标准的 HTTP 爬取任务。
    """
    
    _settings: Settings = autowired()
    _async_client: Optional[httpx.AsyncClient] = None

    def __init__(self):
        pass

    @property
    def client(self) -> httpx.AsyncClient:
        """
        懒加载并复用 httpx.AsyncClient 实例以提高性能。
        """
        if self._async_client is None:
            # 在这里可以应用更复杂的配置，例如从 settings 读取超时、代理等
            self._async_client = httpx.AsyncClient(
                http2=True,
                follow_redirects=True,
                timeout=30.0
            )
        return self._async_client

    async def download(self, target: Target) -> Response:
        """
        使用 httpx 执行下载。
        """
        # 合并请求头，优先使用 Target 中定义的
        headers = self._get_default_headers()
        if target.headers:
            headers.update(target.headers)
            
        # 从元数据中获取代理
        proxy = target.metadata.get("proxy")
        proxies = {"http://": proxy, "https://": proxy} if proxy else None

        try:
            http_response = await self.client.request(
                method=target.method,
                url=target.url,
                headers=headers,
                cookies=target.cookies,
                data=target.data,
                json=target.json_data,
                proxies=proxies,
            )
            http_response.raise_for_status()
        except httpx.RequestError as e:
            raise DownloadError(f"下载 {target.url} 时发生网络错误: {e}") from e
        except httpx.HTTPStatusError as e:
             raise DownloadError(f"下载 {target.url} 时收到非 2xx 状态码: {e.response.status_code}") from e

        return Response(
            url=str(http_response.url),
            status_code=http_response.status_code,
            headers=dict(http_response.headers),
            content=http_response.content,
            text=http_response.text,
            target=target
        )

    def _get_default_headers(self) -> dict:
        return {
            "User-Agent": self._settings.scraper.default_user_agent,
            "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
            "Accept-Language": "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
        } 