from fastapi import APIRouter, HTTPException, Depends, BackgroundTasks, Query
from typing import List, Optional
from app.models.article import ArticleCreate, ArticleUpdate, ArticleResponse, PaginatedArticles
from app.services.article_service import ArticleService
from app.services.ai_model_service import AIModelService
from fastapi.responses import StreamingResponse
import requests
from bs4 import BeautifulSoup
import re

router = APIRouter()

# 添加一个简单的 mock 函数来替代 get_current_user
async def get_current_user():
    """临时的用户认证函数，返回 None 表示未登录用户"""
    return None

@router.post("/articles", response_model=ArticleResponse)
async def create_article(article: ArticleCreate):
    """创建新文章"""
    article_service = ArticleService()
    return await article_service.create_article(article)

@router.get("/articles", response_model=PaginatedArticles)
async def get_articles(
    page: int = Query(1, ge=1),
    limit: int = Query(10, ge=1, le=100),
    search: Optional[str] = None
):
    """获取文章列表，支持分页和搜索"""
    try:
        print(f"获取文章列表请求: page={page}, limit={limit}, search={search}")
        service = ArticleService()
        result = await service.get_articles(page, limit, search)
        print(f"获取文章列表成功: {result}")
        return result
    except Exception as e:
        import traceback
        error_detail = traceback.format_exc()
        print(f"获取文章列表出错: {str(e)}\n{error_detail}")
        # 返回一个空的结果，而不是抛出异常
        return {
            "items": [],
            "total": 0,
            "page": page,
            "limit": limit
        }

@router.get("/articles/{article_id}", response_model=ArticleResponse)
async def get_article(article_id: str):
    """获取单篇文章"""
    article_service = ArticleService()
    article = await article_service.get_article(article_id)
    if not article:
        raise HTTPException(status_code=404, detail="Article not found")
    return article

@router.put("/articles/{article_id}", response_model=ArticleResponse)
async def update_article(article_id: str, article: ArticleUpdate):
    """更新文章"""
    article_service = ArticleService()
    updated_article = await article_service.update_article(article_id, article)
    if not updated_article:
        raise HTTPException(status_code=404, detail="Article not found")
    return updated_article

@router.delete("/articles/{article_id}")
async def delete_article(article_id: str):
    """删除文章"""
    service = ArticleService()
    success = await service.delete_article(article_id)
    if not success:
        raise HTTPException(status_code=404, detail="文章不存在")
    return {"message": "文章已删除"}

@router.put("/articles/{article_id}/publish", response_model=ArticleResponse)
async def publish_article(article_id: str):
    """发布文章"""
    article_service = ArticleService()
    published_article = await article_service.publish_article(article_id)
    if not published_article:
        raise HTTPException(status_code=404, detail="Article not found")
    return published_article

@router.post("/articles/generate-content")
async def generate_article_content(data: dict):
    """使用 AI 生成文章内容"""
    model_id = data.get("model_id")
    system_prompt = data.get("system_prompt", "")
    user_prompt = data.get("user_prompt", "")
    
    if not model_id or not user_prompt:
        raise HTTPException(status_code=400, detail="缺少必要参数")
    
    try:
        # 使用 AI 模型服务生成内容
        ai_service = AIModelService()
        content = await ai_service.generate_text(model_id, system_prompt, user_prompt)
        
        return {"content": content}
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"生成内容失败: {str(e)}")

@router.post("/articles/generate-content-stream")
async def generate_article_content_stream(data: dict):
    """使用 AI 流式生成文章内容"""
    model_id = data.get("model_id")
    system_prompt = data.get("system_prompt", "")
    user_prompt = data.get("user_prompt", "")
    
    if not model_id or not user_prompt:
        raise HTTPException(status_code=400, detail="缺少必要参数")
    
    try:
        # 使用 AI 模型服务流式生成内容
        ai_service = AIModelService()
        
        # 返回流式响应
        return StreamingResponse(
            ai_service.generate_text_stream(model_id, system_prompt, user_prompt),
            media_type="text/event-stream"
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"生成内容失败: {str(e)}")

@router.post("/articles/fetch-reference")
async def fetch_reference_content(data: dict):
    """抓取参考文章内容"""
    url = data.get("url")
    
    if not url:
        raise HTTPException(status_code=400, detail="缺少URL参数")
    
    try:
        # 检查是否是微信公众号文章链接
        if "mp.weixin.qq.com" in url:
            return await fetch_wechat_article(url)
        else:
            # 通用网页抓取
            return await fetch_general_webpage(url)
    except Exception as e:
        raise HTTPException(status_code=500, detail=f"抓取内容失败: {str(e)}")

async def fetch_wechat_article(url):
    """抓取微信公众号文章"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取标题
        title = soup.find('h1', class_='rich_media_title').get_text(strip=True)
        
        # 提取作者和公众号名称
        account = ""
        author = ""
        account_info = soup.find('a', class_='wx_tap_link')
        if account_info:
            account = account_info.get_text(strip=True)
        
        author_info = soup.find('span', class_='rich_media_meta_text')
        if author_info:
            author = author_info.get_text(strip=True)
        
        # 提取正文内容
        content_div = soup.find('div', class_='rich_media_content')
        
        # 移除所有脚本和样式标签
        for script in content_div(["script", "style"]):
            script.decompose()
        
        # 获取纯文本内容
        content = content_div.get_text(separator='\n', strip=True)
        
        # 清理多余的空白行
        content = re.sub(r'\n{3,}', '\n\n', content)
        
        return {
            "title": title,
            "author": author,
            "account": account,
            "content": content,
            "url": url
        }
    except Exception as e:
        print(f"抓取微信文章失败: {str(e)}")
        import traceback
        print(traceback.format_exc())
        raise HTTPException(status_code=500, detail=f"抓取微信文章失败: {str(e)}")

async def fetch_general_webpage(url):
    """抓取一般网页内容"""
    try:
        headers = {
            'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
        }
        response = requests.get(url, headers=headers)
        response.raise_for_status()
        
        soup = BeautifulSoup(response.text, 'html.parser')
        
        # 提取标题
        title = soup.find('title').get_text(strip=True) if soup.find('title') else "未知标题"
        
        # 尝试找到主要内容区域
        main_content = None
        for selector in ['article', 'main', '.content', '#content', '.post', '.article']:
            main_content = soup.select_one(selector)
            if main_content:
                break
        
        if not main_content:
            main_content = soup.body
        
        # 移除所有脚本、样式、导航、页脚等标签
        for tag in main_content(["script", "style", "nav", "footer", "header", "aside"]):
            tag.decompose()
        
        # 获取纯文本内容
        content = main_content.get_text(separator='\n', strip=True)
        
        # 清理多余的空白行
        content = re.sub(r'\n{3,}', '\n\n', content)
        
        return {
            "title": title,
            "content": content,
            "url": url
        }
    except Exception as e:
        print(f"抓取网页失败: {str(e)}")
        import traceback
        print(traceback.format_exc())
        raise HTTPException(status_code=500, detail=f"抓取网页失败: {str(e)}")

@router.get("/drafts", response_model=List[ArticleResponse])
async def get_drafts(
    limit: int = 100, 
    offset: int = 0,
    current_user: dict = Depends(get_current_user)
):
    """获取当前用户的草稿列表"""
    article_service = ArticleService()
    articles = await article_service.get_articles(
        status="draft", 
        user_id=current_user["id"] if current_user else None,
        limit=limit,
        offset=offset
    )
    return articles 