"""网页内容抓取 MCP 服务器

提供网页内容抓取的 MCP 工具接口，独立于现有的大鱼号和小红书发布功能。

启动方式:
    python -m xhs_mcp_server.scraper_server

环境变量配置:
    SCRAPER_HEADLESS          : 无头模式运行 (true/false)
    SCRAPER_USER_DATA_DIR     : Chrome 用户数据目录
    SCRAPER_WAIT_TIMEOUT      : 页面等待超时时间（秒）
    SCRAPER_DOWNLOAD_IMAGES   : 是否下载图片到本地 (true/false)
    SCRAPER_IMAGE_DIR         : 图片下载目录
    SCRAPER_MAX_IMAGES        : 最大图片数量
    SCRAPER_MIN_IMAGE_SIZE    : 最小图片尺寸过滤（像素）

工具接口:
    - scrape_webpage         : 抓取单个网页内容
    - scrape_webpage_async   : 异步抓取单个网页
    - scrape_webpage_wait    : 抓取并等待完成
    - scrape_multiple_urls   : 批量抓取多个网页
    - get_scrape_job_status  : 查询抓取任务状态
"""
from __future__ import annotations

import threading
import uuid
import traceback
import time
import os
import json
from typing import List, Optional

from mcp.server import FastMCP
from mcp.types import TextContent

try:
    from .scraper import WebScraper, ScraperConfig, ScrapedContent
except ImportError:
    # 如果作为独立模块运行
    import sys
    import os
    sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
    from scraper import WebScraper, ScraperConfig, ScrapedContent

mcp = FastMCP("webpage-scraper")

# 异步任务管理（内存存储）
SCRAPE_JOBS: dict[str, dict] = {}

def _set_scrape_job(job_id: str, **kwargs):
    """设置抓取任务状态"""
    job = SCRAPE_JOBS.get(job_id, {})
    job.update(kwargs)
    SCRAPE_JOBS[job_id] = job

def _start_scrape_job(target, *args, **kwargs) -> str:
    """启动抓取任务"""
    job_id = str(uuid.uuid4())
    _set_scrape_job(job_id, status="pending", result=None, error=None)

    def runner():
        _set_scrape_job(job_id, status="running")
        try:
            result = target(*args, **kwargs)
            # 转换结果为可序列化的字典
            if isinstance(result, ScrapedContent):
                result_dict = {
                    "url": result.url,
                    "title": result.title,
                    "content": result.content[:2000],  # 限制内容长度
                    "content_length": len(result.content),
                    "images": result.images,
                    "image_count": len(result.images),
                    "meta_description": result.meta_description,
                    "author": result.author,
                    "publish_date": result.publish_date,
                    "tags": result.tags,
                    "error": result.error,
                }
            elif isinstance(result, list):
                result_dict = []
                for item in result:
                    if isinstance(item, ScrapedContent):
                        result_dict.append({
                            "url": item.url,
                            "title": item.title,
                            "content": item.content[:1000],  # 批量时限制更短
                            "content_length": len(item.content),
                            "images": item.images[:5],  # 批量时限制图片数量
                            "image_count": len(item.images),
                            "meta_description": item.meta_description,
                            "author": item.author,
                            "publish_date": item.publish_date,
                            "tags": item.tags[:5],  # 限制标签数量
                            "error": item.error,
                        })
            else:
                result_dict = result
            
            _set_scrape_job(job_id, status="done", result=result_dict, error=None)
        except Exception:
            _set_scrape_job(job_id, status="error", error=traceback.format_exc(), result=None)
    
    thread = threading.Thread(target=runner, daemon=True)
    thread.start()
    return job_id

def _wait_for_scrape_job(job_id: str, timeout_sec: int) -> tuple[bool, str]:
    """等待抓取任务完成"""
    start = time.time()
    while time.time() - start < timeout_sec:
        job = SCRAPE_JOBS.get(job_id)
        if job:
            status = job.get("status")
            if status == "done":
                result = job.get("result")
                return True, json.dumps(result, ensure_ascii=False, indent=2)
            if status == "error":
                return True, f"抓取失败: {job.get('error')}"
        time.sleep(1)
    return False, f"任务超时: {job_id}"

# -------------------- MCP 工具定义 --------------------

@mcp.tool()
def ping_scraper(message: str = "ping") -> list[TextContent]:
    """测试网页抓取服务是否可用"""
    return [TextContent(type="text", text=f"pong from scraper: {message}")]

@mcp.tool()
def scrape_webpage(
    url: str,
    download_images: bool = False,
    image_dir: Optional[str] = None,
    headless: bool = True,
    wait_timeout: int = 15
) -> list[TextContent]:
    """抓取单个网页的内容（异步方式）
    
    Args:
        url: 要抓取的网页URL
        download_images: 是否下载图片到本地
        image_dir: 图片下载目录（如果不指定则使用临时目录）
        headless: 是否使用无头浏览器
        wait_timeout: 页面加载超时时间（秒）
    
    Returns:
        包含 job_id 的文本，可用于查询抓取状态
    """
    def scrape_task():
        config = ScraperConfig()
        config.headless = headless
        config.wait_timeout = wait_timeout
        config.download_images = download_images
        if image_dir:
            config.image_download_dir = image_dir
        
        with WebScraper(config) as scraper:
            return scraper.scrape_url(url)
    
    job_id = _start_scrape_job(scrape_task)
    return [TextContent(type="text", text=f"job_id={job_id}")]

@mcp.tool()
def scrape_webpage_async(
    url: str,
    download_images: bool = False,
    image_dir: Optional[str] = None,
    headless: bool = True,
    wait_timeout: int = 15
) -> list[TextContent]:
    """抓取单个网页的内容（异步方式）- 与 scrape_webpage 相同"""
    return scrape_webpage(url, download_images, image_dir, headless, wait_timeout)

@mcp.tool()
def scrape_webpage_wait(
    url: str,
    download_images: bool = False,
    image_dir: Optional[str] = None,
    headless: bool = True,
    wait_timeout: int = 15,
    job_timeout: int = 60
) -> list[TextContent]:
    """抓取单个网页的内容并等待完成
    
    Args:
        url: 要抓取的网页URL
        download_images: 是否下载图片到本地
        image_dir: 图片下载目录
        headless: 是否使用无头浏览器
        wait_timeout: 页面加载超时时间（秒）
        job_timeout: 任务总超时时间（秒）
    
    Returns:
        抓取结果的 JSON 文本
    """
    # 启动抓取任务
    resp = scrape_webpage(url, download_images, image_dir, headless, wait_timeout)
    job_id = resp[0].text.split("job_id=")[-1]
    
    # 等待任务完成
    finished, result = _wait_for_scrape_job(job_id, job_timeout)
    if finished:
        return [TextContent(type="text", text=result)]
    else:
        return [TextContent(type="text", text=f"等待超时: {result}; job_id={job_id}")]

@mcp.tool()
def scrape_multiple_urls(
    urls: List[str],
    download_images: bool = False,
    image_dir: Optional[str] = None,
    headless: bool = True,
    wait_timeout: int = 15
) -> list[TextContent]:
    """批量抓取多个网页的内容（异步方式）
    
    Args:
        urls: 要抓取的网页URL列表
        download_images: 是否下载图片到本地
        image_dir: 图片下载目录
        headless: 是否使用无头浏览器
        wait_timeout: 页面加载超时时间（秒）
    
    Returns:
        包含 job_id 的文本，可用于查询抓取状态
    """
    def scrape_batch_task():
        config = ScraperConfig()
        config.headless = headless
        config.wait_timeout = wait_timeout
        config.download_images = download_images
        if image_dir:
            config.image_download_dir = image_dir
        
        with WebScraper(config) as scraper:
            return scraper.scrape_multiple_urls(urls)
    
    job_id = _start_scrape_job(scrape_batch_task)
    return [TextContent(type="text", text=f"job_id={job_id} (批量抓取 {len(urls)} 个URL)")]

@mcp.tool()
def scrape_multiple_urls_wait(
    urls: List[str],
    download_images: bool = False,
    image_dir: Optional[str] = None,
    headless: bool = True,
    wait_timeout: int = 15,
    job_timeout: int = 300
) -> list[TextContent]:
    """批量抓取多个网页的内容并等待完成
    
    Args:
        urls: 要抓取的网页URL列表
        download_images: 是否下载图片到本地
        image_dir: 图片下载目录
        headless: 是否使用无头浏览器
        wait_timeout: 页面加载超时时间（秒）
        job_timeout: 任务总超时时间（秒）
    
    Returns:
        批量抓取结果的 JSON 文本
    """
    # 启动批量抓取任务
    resp = scrape_multiple_urls(urls, download_images, image_dir, headless, wait_timeout)
    job_id = resp[0].text.split("job_id=")[-1].split(" ")[0]
    
    # 等待任务完成
    finished, result = _wait_for_scrape_job(job_id, job_timeout)
    if finished:
        return [TextContent(type="text", text=result)]
    else:
        return [TextContent(type="text", text=f"等待超时: {result}; job_id={job_id}")]

@mcp.tool()
def get_scrape_job_status(job_id: str) -> list[TextContent]:
    """查询抓取任务的状态
    
    Args:
        job_id: 任务ID
    
    Returns:
        任务状态和结果
    """
    job = SCRAPE_JOBS.get(job_id)
    if not job:
        return [TextContent(type="text", text=f"任务不存在: {job_id}")]
    
    status = job.get("status", "unknown")
    if status == "done":
        result = job.get("result")
        return [TextContent(type="text", text=json.dumps(result, ensure_ascii=False, indent=2))]
    elif status == "error":
        error = job.get("error", "未知错误")
        return [TextContent(type="text", text=f"任务失败: {error}")]
    else:
        return [TextContent(type="text", text=f"任务状态: {status}")]

@mcp.tool()
def list_scrape_jobs() -> list[TextContent]:
    """列出所有抓取任务的状态"""
    if not SCRAPE_JOBS:
        return [TextContent(type="text", text="没有抓取任务")]
    
    job_list = []
    for job_id, job in SCRAPE_JOBS.items():
        status = job.get("status", "unknown")
        job_info = f"{job_id}: {status}"
        if status == "done":
            result = job.get("result")
            if isinstance(result, dict):
                job_info += f" - {result.get('title', 'N/A')[:50]}"
            elif isinstance(result, list):
                job_info += f" - 批量任务 ({len(result)} 条结果)"
        elif status == "error":
            error = job.get("error", "")[:100]
            job_info += f" - 错误: {error}"
        job_list.append(job_info)
    
    return [TextContent(type="text", text="\n".join(job_list))]

@mcp.tool()
def clear_scrape_jobs() -> list[TextContent]:
    """清除所有抓取任务记录"""
    cleared_count = len(SCRAPE_JOBS)
    SCRAPE_JOBS.clear()
    return [TextContent(type="text", text=f"已清除 {cleared_count} 个任务记录")]

def main():
    """启动网页抓取 MCP 服务器"""
    mcp.run()

if __name__ == "__main__":
    main()