from fastapi import FastAPI, HTTPException, BackgroundTasks
from pydantic import BaseModel
import asyncio
from playwright.async_api import async_playwright
import re
import json
import time
from typing import Optional, Dict, Any

app = FastAPI(
    title="微信公众号文章解析器",
    description="一个用于解析微信公众号文章内容的MCP工具",
    version="1.0.0"
)

# MCP元数据
MCP_METADATA = {
    "name": "微信公众号文章解析器",
    "description": "使用Playwright模拟浏览器访问，解析微信公众号文章内容",
    "parameters": {
        "type": "object",
        "properties": {
            "url": {
                "type": "string",
                "description": "微信公众号文章URL"
            },
            "timeout": {
                "type": "integer",
                "description": "页面加载超时时间（毫秒）",
                "default": 30000
            }
        },
        "required": ["url"]
    }
}

# 输入模型
class ArticleRequest(BaseModel):
    url: str
    timeout: Optional[int] = 30000

# 输出模型
class ArticleResponse(BaseModel):
    title: str
    content: str
    summary: Optional[str] = None
    author: Optional[str] = None
    publish_time: Optional[str] = None
    cover_image: Optional[str] = None
    images: Optional[list] = None
    success: bool
    error: Optional[str] = None

@app.get("/metadata")
async def get_metadata():
    """获取MCP工具的元数据"""
    return MCP_METADATA

async def extract_article_content(page):
    """从页面中提取文章内容"""
    # 尝试等待文章内容加载完成，如果失败则尝试直接提取
    try:
        await page.wait_for_selector(".rich_media_content", timeout=5000)
    except:
        # 如果超时，继续尝试提取，可能内容已经加载但选择器不匹配
        pass
    
    # 获取标题
    title_element = await page.query_selector("#activity-name")
    title = await title_element.text_content() if title_element else "未知标题"
    title = title.strip()
    
    # 获取作者
    author_element = await page.query_selector(".rich_media_meta_nickname")
    author = await author_element.text_content() if author_element else None
    
    # 获取发布时间
    publish_time_element = await page.query_selector(".rich_media_meta_text")
    publish_time = await publish_time_element.text_content() if publish_time_element else None
    
    # 获取封面图
    cover_element = await page.query_selector("meta[property='og:image']")
    cover_image = await cover_element.get_attribute("content") if cover_element else None
    
    # 获取文章内容
    content_element = await page.query_selector(".rich_media_content")
    content_html = await content_element.inner_html() if content_element else ""
    
    # 清理HTML标签，提取纯文本
    # 保留段落分隔，去除其他HTML标签
    content = re.sub(r'<[^>]*>', '', content_html)
    content = re.sub(r'\s+', ' ', content)
    content = content.strip()
    
    # 获取所有图片
    images = []
    image_elements = await page.query_selector_all(".rich_media_content img")
    for img in image_elements:
        img_url = await img.get_attribute("data-src") or await img.get_attribute("src")
        if img_url:
            images.append(img_url)
    
    # 提取摘要
    # 方法1：尝试从页面meta标签获取摘要
    summary = None
    summary_element = await page.query_selector("meta[property='og:description']")
    if summary_element:
        summary = await summary_element.get_attribute("content")
    
    # 方法2：如果没有找到meta摘要，则从文章内容中提取前200个字符作为摘要
    if not summary and content:
        # 提取前200个字符作为摘要，确保在单词边界处截断
        if len(content) > 200:
            # 找到200字符附近的空格位置，避免截断单词
            summary_end = content.rfind(' ', 0, 200)
            if summary_end == -1:
                summary_end = 200
            summary = content[:summary_end].strip() + "..."
        else:
            summary = content.strip()
    
    return {
        "title": title,
        "content": content,
        "summary": summary,
        "author": author,
        "publish_time": publish_time,
        "cover_image": cover_image,
        "images": images if images else None
    }

@app.post("/process", response_model=ArticleResponse)
async def process_article(request: ArticleRequest):
    """处理微信公众号文章请求"""
    try:
        # 验证URL格式
        if not request.url.startswith("https://mp.weixin.qq.com"):
            raise HTTPException(status_code=400, detail="请提供有效的微信公众号文章URL")
        
        async with async_playwright() as playwright:
            # 启动浏览器
            browser = await playwright.chromium.launch(
                headless=True,
                args=[
                    '--no-sandbox',
                    '--disable-setuid-sandbox',
                    '--disable-dev-shm-usage'
                ]
            )
            
            # 创建上下文，设置用户代理
            context = await browser.new_context(
                user_agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36",
                viewport={"width": 1280, "height": 720}
            )
            
            # 创建页面
            page = await context.new_page()
            
            # 设置请求拦截，忽略图片等资源加载以提高速度
            await page.route("**/*.{png,jpg,jpeg,gif,svg,css,js}", lambda route: route.continue_())
            
            # 导航到文章URL，使用domcontentloaded策略加速加载
            await page.goto(request.url, timeout=request.timeout, wait_until="domcontentloaded")
            
            # 等待页面核心内容加载
            await asyncio.sleep(2)
            
            # 提取文章内容
            article_data = await extract_article_content(page)
            
            # 关闭浏览器
            await browser.close()
            
            # 返回解析结果
            return ArticleResponse(
                **article_data,
                success=True
            )
            
    except Exception as e:
        return ArticleResponse(
            title="",
            content="",
            success=False,
            error=str(e)
        )

@app.get("/")
async def root():
    """根路径，返回工具信息"""
    return {
        "message": "微信公众号文章解析器MCP工具",
        "version": "1.0.0",
        "usage": "请通过POST请求访问 /process 端点，并提供微信公众号文章URL"
    }

if __name__ == "__main__":
    import uvicorn
    uvicorn.run(app, host="0.0.0.0", port=8000)