from fastapi import APIRouter, Depends, HTTPException, UploadFile, File, Form
from fastapi.responses import FileResponse
from sqlalchemy.orm import Session
from sqlalchemy import or_, and_
from app.api.deps import get_db, get_current_user
from app.models.user import User
from app.models.document import Document, DocumentStatus
from app.schemas.document import (
    DocumentCreate,
    DocumentUpdate,
    DocumentResponse,
    DocumentListResponse
)
from typing import Optional, List
import logging
import uuid
import hashlib
import json
import os
from datetime import datetime
from app.core.config import settings
from app.services.ai_classifier import AIClassifier
from app.middleware.input_validation import InputValidator
from app.dependencies.permissions import check_document_permission
from app.utils.db_utils import get_db_transaction, safe_commit

logger = logging.getLogger(__name__)

router = APIRouter(prefix="/documents", tags=["documents"])


def calculate_file_hash(file_content: bytes) -> str:
    """计算文件MD5哈希值"""
    return hashlib.md5(file_content).hexdigest()


@router.post("/upload", response_model=DocumentResponse)
async def upload_document(
    file: UploadFile = File(...),
    title: Optional[str] = Form(None),
    category_id: Optional[str] = Form(None),
    knowledge_base_id: Optional[str] = Form(None),
    tags: Optional[str] = Form(None),  # JSON字符串
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    上传文档
    
    Args:
        file: 上传的文件
        title: 文档标题（可选，默认使用文件名）
        category_id: 分类ID
        knowledge_base_id: 知识库ID
        tags: 标签（JSON数组字符串）
        current_user: 当前用户
        db: 数据库会话
        
    Returns:
        DocumentResponse: 上传的文档信息
    """
    try:
        # 读取文件内容
        file_content = await file.read()
        file_size = len(file_content)
        
        # 验证文件大小
        if file_size > settings.MAX_FILE_SIZE:
            raise HTTPException(
                status_code=400,
                detail=f"文件大小超过限制（最大{settings.MAX_FILE_SIZE / 1024 / 1024}MB）"
            )
        
        # 验证文件类型
        file_extension = file.filename.split('.')[-1].lower() if '.' in file.filename else ''
        if file_extension not in settings.ALLOWED_FILE_TYPES:
            raise HTTPException(
                status_code=400,
                detail=f"不支持的文件类型。支持的类型：{', '.join(settings.ALLOWED_FILE_TYPES)}"
            )
        
        # 计算文件哈希
        file_hash = calculate_file_hash(file_content)
        
        # 检查是否已存在相同文件（同一用户+同一知识库才算重复）
        if knowledge_base_id:
            existing_doc = db.query(Document).filter(
                and_(
                    Document.file_hash == file_hash,
                    Document.upload_user_id == str(current_user.id),
                    Document.knowledge_base_id == knowledge_base_id
                )
            ).first()
            
            if existing_doc:
                raise HTTPException(
                    status_code=400,
                    detail=f"该文件已在此知识库中：{existing_doc.title}"
                )
        else:
            # 如果没有指定知识库，则检查用户是否已上传过该文件
            existing_doc = db.query(Document).filter(
                and_(
                    Document.file_hash == file_hash,
                    Document.upload_user_id == str(current_user.id)
                )
            ).first()
            
            if existing_doc:
                raise HTTPException(
                    status_code=400,
                    detail=f"文件已存在：{existing_doc.title}（已在其他位置上传）"
                )
        
        # 创建存储目录
        user_dir = os.path.join(settings.USER_DOCS_DIR, str(current_user.id))
        os.makedirs(user_dir, exist_ok=True)
        
        # 生成唯一文件名
        doc_id = str(uuid.uuid4())
        file_path = os.path.join(user_dir, f"{doc_id}.{file_extension}")
        
        # 保存文件
        with open(file_path, "wb") as f:
            f.write(file_content)
        
        # 解析标签
        tag_list = []
        if tags:
            try:
                tag_list = json.loads(tags)
            except json.JSONDecodeError:
                logger.warning(f"无法解析标签JSON: {tags}")
        
        # 创建文档记录
        document = Document(
            id=doc_id,
            title=title or file.filename,
            file_name=file.filename,
            file_path=file_path,
            file_type=file.content_type or f"application/{file_extension}",
            file_size=file_size,
            file_hash=file_hash,
            category_id=category_id,
            knowledge_base_id=knowledge_base_id,
            tags=json.dumps(tag_list, ensure_ascii=False),
            upload_user_id=str(current_user.id),
            upload_user_name=current_user.username,
            status=DocumentStatus.COMPLETED,
            upload_time=datetime.utcnow(),
            is_vectorized=False
        )
        
        db.add(document)
        db.commit()
        db.refresh(document)
        
        logger.info(f"用户 {current_user.username} 上传文档: {document.title} ({file_size} bytes)")
        
        # 转换文档对象，解析tags
        doc_dict = {
            "id": document.id,
            "title": document.title,
            "file_name": document.file_name,
            "file_type": document.file_type,
            "file_size": document.file_size,
            "status": document.status,
            "upload_user_name": document.upload_user_name,
            "upload_time": document.upload_time,
            "updated_at": document.updated_at,
            "page_count": document.page_count,
            "word_count": document.word_count,
            "summary": document.summary,
            "is_vectorized": document.is_vectorized,
            "category_id": document.category_id,
            "knowledge_base_id": document.knowledge_base_id,
            "tags": tag_list
        }
        
        return doc_dict
        
    except HTTPException:
        raise
    except Exception as e:
        logger.error(f"上传文档失败: {str(e)}")
        db.rollback()
        raise HTTPException(status_code=500, detail=f"上传失败: {str(e)}")


@router.get("/list", response_model=DocumentListResponse)
async def list_documents(
    page: int = 1,
    page_size: int = 20,
    search: Optional[str] = None,
    category_id: Optional[str] = None,
    knowledge_base_id: Optional[str] = None,
    status: Optional[DocumentStatus] = None,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    获取文档列表（分页）
    
    Args:
        page: 页码（从1开始）
        page_size: 每页数量
        search: 搜索关键词（标题或文件名）
        category_id: 分类ID筛选
        knowledge_base_id: 知识库ID筛选
        status: 状态筛选
        current_user: 当前用户
        db: 数据库会话
        
    Returns:
        DocumentListResponse: 文档列表
    """
    try:
        # 构建查询
        query = db.query(Document).filter(Document.upload_user_id == str(current_user.id))
        
        # 搜索过滤（标题、文件名、摘要）
        if search:
            query = query.filter(
                or_(
                    Document.title.ilike(f"%{search}%"),
                    Document.file_name.ilike(f"%{search}%"),
                    Document.summary.ilike(f"%{search}%")
                )
            )
        
        # 分类过滤
        if category_id:
            query = query.filter(Document.category_id == category_id)
        
        # 知识库过滤
        if knowledge_base_id:
            query = query.filter(Document.knowledge_base_id == knowledge_base_id)
        
        # 状态过滤
        if status:
            query = query.filter(Document.status == status)
        
        # 总数
        total = query.count()
        
        # 分页
        skip = (page - 1) * page_size
        documents = query.order_by(Document.upload_time.desc()).offset(skip).limit(page_size).all()
        
        # 转换文档对象，解析tags
        items = []
        for doc in documents:
            doc_dict = {
                "id": doc.id,
                "title": doc.title,
                "file_name": doc.file_name,
                "file_type": doc.file_type,
                "file_size": doc.file_size,
                "status": doc.status,
                "upload_user_name": doc.upload_user_name,
                "upload_time": doc.upload_time,
                "updated_at": doc.updated_at,
                "page_count": doc.page_count,
                "word_count": doc.word_count,
                "summary": doc.summary,
                "is_vectorized": doc.is_vectorized,
                "category_id": doc.category_id,
                "knowledge_base_id": doc.knowledge_base_id,
                "tags": json.loads(doc.tags) if doc.tags else []
            }
            items.append(doc_dict)
        
        return {
            "total": total,
            "items": items,
            "page": page,
            "page_size": page_size
        }
        
    except Exception as e:
        logger.error(f"获取文档列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取文档列表失败: {str(e)}")


@router.get("/{document_id}", response_model=DocumentResponse)
async def get_document(
    document_id: str,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    获取文档详情
    
    Args:
        document_id: 文档ID
        current_user: 当前用户
        db: 数据库会话
        
    Returns:
        DocumentResponse: 文档信息
    """
    document = db.query(Document).filter(
        and_(
            Document.id == document_id,
            Document.upload_user_id == str(current_user.id)
        )
    ).first()
    
    if not document:
        raise HTTPException(status_code=404, detail="文档不存在")
    
    # 转换文档对象，解析tags
    doc_dict = {
        "id": document.id,
        "title": document.title,
        "file_name": document.file_name,
        "file_type": document.file_type,
        "file_size": document.file_size,
        "status": document.status,
        "upload_user_name": document.upload_user_name,
        "upload_time": document.upload_time,
        "updated_at": document.updated_at,
        "page_count": document.page_count,
        "word_count": document.word_count,
        "summary": document.summary,
        "is_vectorized": document.is_vectorized,
        "category_id": document.category_id,
        "knowledge_base_id": document.knowledge_base_id,
        "tags": json.loads(document.tags) if document.tags else []
    }
    
    return doc_dict


@router.put("/{document_id}", response_model=DocumentResponse)
async def update_document(
    document_id: str,
    doc_update: DocumentUpdate,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    更新文档信息
    
    Args:
        document_id: 文档ID
        doc_update: 更新数据
        current_user: 当前用户
        db: 数据库会话
        
    Returns:
        DocumentResponse: 更新后的文档信息
    """
    document = db.query(Document).filter(
        and_(
            Document.id == document_id,
            Document.upload_user_id == str(current_user.id)
        )
    ).first()
    
    if not document:
        raise HTTPException(status_code=404, detail="文档不存在")
    
    try:
        # 更新字段
        if doc_update.title is not None:
            document.title = doc_update.title
        if doc_update.category_id is not None:
            document.category_id = doc_update.category_id
        if doc_update.knowledge_base_id is not None:
            document.knowledge_base_id = doc_update.knowledge_base_id
        if doc_update.tags is not None:
            document.tags = json.dumps(doc_update.tags, ensure_ascii=False)
        if doc_update.summary is not None:
            document.summary = doc_update.summary
        
        document.updated_at = datetime.utcnow()
        
        db.commit()
        db.refresh(document)
        
        logger.info(f"用户 {current_user.username} 更新文档: {document.title}")
        
        # 转换文档对象，解析tags
        doc_dict = {
            "id": document.id,
            "title": document.title,
            "file_name": document.file_name,
            "file_type": document.file_type,
            "file_size": document.file_size,
            "status": document.status,
            "upload_user_name": document.upload_user_name,
            "upload_time": document.upload_time,
            "updated_at": document.updated_at,
            "page_count": document.page_count,
            "word_count": document.word_count,
            "summary": document.summary,
            "is_vectorized": document.is_vectorized,
            "category_id": document.category_id,
            "knowledge_base_id": document.knowledge_base_id,
            "tags": json.loads(document.tags) if document.tags else []
        }
        
        return doc_dict
        
    except Exception as e:
        logger.error(f"更新文档失败: {str(e)}")
        db.rollback()
        raise HTTPException(status_code=500, detail=f"更新失败: {str(e)}")


@router.post("/batch-vectorize")
async def batch_vectorize_documents(
    document_ids: list[str],
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    批量向量化文档（限制并发，考虑性能）
    
    Args:
        document_ids: 文档ID列表
        current_user: 当前用户
        db: 数据库会话
        
    Returns:
        dict: 向量化结果统计
    """
    if not document_ids:
        raise HTTPException(status_code=400, detail="请选择要向量化的文档")
    
    # 限制单次最多5个文档（避免性能问题）
    MAX_BATCH_SIZE = 5
    if len(document_ids) > MAX_BATCH_SIZE:
        raise HTTPException(
            status_code=400, 
            detail=f"单次最多向量化 {MAX_BATCH_SIZE} 个文档，请分批处理"
        )
    
    success_count = 0
    failed_count = 0
    failed_details = []
    
    for document_id in document_ids:
        try:
            # 查询文档
            document = db.query(Document).filter(
                and_(
                    Document.id == document_id,
                    Document.upload_user_id == str(current_user.id)
                )
            ).first()
            
            if not document:
                failed_count += 1
                failed_details.append({"id": document_id, "reason": "文档不存在或无权限"})
                continue
            
            if document.is_vectorized:
                failed_count += 1
                failed_details.append({"id": document_id, "reason": "文档已向量化"})
                continue
            
            # 向量化
            from app.services.document_processor import DocumentProcessor
            from app.services.ollama_service import OllamaService
            
            ollama_service = OllamaService()
            doc_processor = DocumentProcessor(ollama_service)
            
            result = doc_processor.vectorize_document(document, db)
            
            if result:
                success_count += 1
                logger.info(f"成功向量化文档: {document.title}")
            else:
                failed_count += 1
                failed_details.append({"id": document_id, "reason": "向量化失败"})
                
        except Exception as e:
            failed_count += 1
            failed_details.append({"id": document_id, "reason": str(e)})
            logger.error(f"向量化文档 {document_id} 失败: {str(e)}")
    
    logger.info(f"用户 {current_user.username} 批量向量化: 成功 {success_count}, 失败 {failed_count}")
    
    return {
        "message": f"批量向量化完成",
        "success_count": success_count,
        "failed_count": failed_count,
        "failed_details": failed_details if failed_details else None
    }


@router.post("/batch-move")
async def batch_move_documents(
    data: dict,  # {"document_ids": [...], "target_kb_id": "..."}
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    批量移动文档到其他知识库
    
    Args:
        data: 包含document_ids和target_kb_id
        current_user: 当前用户
        db: 数据库会话
        
    Returns:
        dict: 移动结果统计
    """
    document_ids = data.get("document_ids", [])
    target_kb_id = data.get("target_kb_id")
    
    if not document_ids:
        raise HTTPException(status_code=400, detail="请选择要移动的文档")
    
    if not target_kb_id:
        raise HTTPException(status_code=400, detail="请选择目标知识库")
    
    # 验证目标知识库
    from app.models.knowledge_base import KnowledgeBase
    target_kb = db.query(KnowledgeBase).filter(
        KnowledgeBase.id == target_kb_id
    ).first()
    
    if not target_kb:
        raise HTTPException(status_code=404, detail="目标知识库不存在")
    
    success_count = 0
    failed_count = 0
    failed_details = []
    
    for document_id in document_ids:
        try:
            # 查询文档
            document = db.query(Document).filter(
                and_(
                    Document.id == document_id,
                    Document.upload_user_id == str(current_user.id)
                )
            ).first()
            
            if not document:
                failed_count += 1
                failed_details.append({"id": document_id, "reason": "文档不存在或无权限"})
                continue
            
            # 更新原知识库统计
            if document.knowledge_base_id:
                old_kb = db.query(KnowledgeBase).filter(
                    KnowledgeBase.id == document.knowledge_base_id
                ).first()
                if old_kb and old_kb.document_count > 0:
                    old_kb.document_count -= 1
            
            # 移动到新知识库
            document.knowledge_base_id = target_kb_id
            
            # 更新新知识库统计
            target_kb.document_count = (target_kb.document_count or 0) + 1
            
            success_count += 1
            logger.info(f"移动文档 {document.title} 到知识库 {target_kb.name}")
            
        except Exception as e:
            failed_count += 1
            failed_details.append({"id": document_id, "reason": str(e)})
            logger.error(f"移动文档 {document_id} 失败: {str(e)}")
    
    # 提交所有更改
    try:
        db.commit()
    except Exception as e:
        db.rollback()
        raise HTTPException(status_code=500, detail=f"批量移动失败: {str(e)}")
    
    logger.info(f"用户 {current_user.username} 批量移动文档: 成功 {success_count}, 失败 {failed_count}")
    
    return {
        "message": f"批量移动完成",
        "success_count": success_count,
        "failed_count": failed_count,
        "failed_details": failed_details if failed_details else None
    }


@router.post("/batch-delete")
async def batch_delete_documents(
    document_ids: list[str],
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    批量删除文档
    
    Args:
        document_ids: 文档ID列表
        current_user: 当前用户
        db: 数据库会话
        
    Returns:
        dict: 删除结果统计
    """
    if not document_ids:
        raise HTTPException(status_code=400, detail="请选择要删除的文档")
    
    success_count = 0
    failed_count = 0
    failed_details = []
    
    for document_id in document_ids:
        try:
            # 查询文档（只能删除自己的）
            document = db.query(Document).filter(
                and_(
                    Document.id == document_id,
                    Document.upload_user_id == str(current_user.id)
                )
            ).first()
            
            if not document:
                failed_count += 1
                failed_details.append({"id": document_id, "reason": "文档不存在或无权限"})
                continue
            
            # 删除向量数据
            if document.is_vectorized and document.vector_store_id:
                try:
                    vector_path = os.path.join(
                        settings.VECTOR_STORE_DIR, 
                        f"{document.vector_store_id}.faiss"
                    )
                    if os.path.exists(vector_path):
                        os.remove(vector_path)
                        logger.info(f"删除向量文件: {vector_path}")
                except Exception as e:
                    logger.warning(f"清理向量数据失败: {str(e)}")
            
            # 删除原始文件
            if os.path.exists(document.file_path):
                os.remove(document.file_path)
                logger.info(f"删除文件: {document.file_path}")
            
            # 更新知识库统计
            if document.knowledge_base_id:
                from app.models.knowledge_base import KnowledgeBase
                kb = db.query(KnowledgeBase).filter(
                    KnowledgeBase.id == document.knowledge_base_id
                ).first()
                if kb and kb.document_count > 0:
                    kb.document_count -= 1
            
            # 删除数据库记录
            db.delete(document)
            success_count += 1
            
        except Exception as e:
            failed_count += 1
            failed_details.append({"id": document_id, "reason": str(e)})
            logger.error(f"删除文档 {document_id} 失败: {str(e)}")
    
    # 提交所有更改
    try:
        db.commit()
    except Exception as e:
        db.rollback()
        raise HTTPException(status_code=500, detail=f"批量删除失败: {str(e)}")
    
    logger.info(f"用户 {current_user.username} 批量删除文档: 成功 {success_count}, 失败 {failed_count}")
    
    return {
        "message": f"批量删除完成",
        "success_count": success_count,
        "failed_count": failed_count,
        "failed_details": failed_details if failed_details else None
    }


@router.delete("/{document_id}")
async def delete_document(
    document_id: str,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    删除单个文档
    
    Args:
        document_id: 文档ID
        current_user: 当前用户
        db: 数据库会话
        
    Returns:
        dict: 删除结果
    """
    document = db.query(Document).filter(
        and_(
            Document.id == document_id,
            Document.upload_user_id == str(current_user.id)
        )
    ).first()
    
    if not document:
        raise HTTPException(status_code=404, detail="文档不存在")
    
    try:
        # 1. 删除向量数据（如果已向量化）
        vectors_deleted = False
        if document.is_vectorized and document.vector_store_id:
            try:
                from app.services.vector_store import VectorStore
                
                # 使用文档的 vector_store_id 定位向量文件
                vector_path = os.path.join(
                    settings.VECTOR_STORE_DIR, 
                    f"{document.vector_store_id}.faiss"
                )
                
                # 删除向量文件
                if os.path.exists(vector_path):
                    os.remove(vector_path)
                    vectors_deleted = True
                    logger.info(f"成功删除文档 {document.id} 的向量文件: {vector_path}")
                    
            except Exception as e:
                logger.warning(f"清理向量数据失败: {str(e)}")
                # 不阻止文档删除，只记录警告
        
        # 2. 删除原始文件
        if os.path.exists(document.file_path):
            os.remove(document.file_path)
            logger.info(f"已删除文件: {document.file_path}")
        
        # 3. 更新知识库的文档计数
        if document.knowledge_base_id:
            from app.models.knowledge_base import KnowledgeBase
            kb = db.query(KnowledgeBase).filter(
                KnowledgeBase.id == document.knowledge_base_id
            ).first()
            if kb and kb.document_count > 0:
                kb.document_count -= 1
        
        # 4. 删除数据库记录
        db.delete(document)
        db.commit()
        
        logger.info(f"用户 {current_user.username} 成功删除文档: {document.title}")
        
        return {
            "message": "删除成功",
            "deleted_file": True,
            "deleted_vectors": vectors_deleted,
            "was_vectorized": document.is_vectorized
        }
        
    except Exception as e:
        logger.error(f"删除文档失败: {str(e)}")
        db.rollback()
        raise HTTPException(status_code=500, detail=f"删除失败: {str(e)}")


@router.post("/{document_id}/vectorize")
async def vectorize_document(
    document_id: str,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    向量化文档
    
    Args:
        document_id: 文档ID
        current_user: 当前用户
        db: 数据库会话
        
    Returns:
        dict: 向量化结果
    """
    document = db.query(Document).filter(Document.id == document_id).first()
    
    if not document:
        raise HTTPException(status_code=404, detail="文档不存在")
    
    # 检查是否已向量化（允许重新向量化）
    if document.is_vectorized:
        logger.info(f"文档 {document.title} 已向量化，执行重新向量化")
    
    # 检查是否关联知识库
    if not document.knowledge_base_id:
        raise HTTPException(status_code=400, detail="文档未关联知识库")
    
    try:
        from app.services.document_processor import document_processor
        from app.services.vector_store import VectorStore
        from app.services.llm_client import ollama_client
        
        # 1. 提取文本并分块
        logger.info(f"开始处理文档: {document.title}")
        result = document_processor.process_document(document.file_path, document.file_type)
        
        chunks = result["chunks"]
        if not chunks:
            raise HTTPException(status_code=400, detail="无法从文档中提取文本")
        
        logger.info(f"文档分块完成，共 {len(chunks)} 个块")
        
        # 2. 生成向量
        logger.info("开始生成向量...")
        embeddings = []
        for chunk in chunks:
            emb = await ollama_client.generate_embedding(chunk)
            embeddings.append(emb)
        
        logger.info(f"向量生成完成，共 {len(embeddings)} 个")
        
        # 3. 存储到向量库（使用文档ID作为向量库ID）
        vector_store_id = document.id
        vector_path = os.path.join(settings.VECTOR_STORE_DIR, f"{vector_store_id}.faiss")
        
        # 删除旧的向量库（如果存在）
        if document.vector_store_id and document.vector_store_id != vector_store_id:
            old_vector_path = os.path.join(settings.VECTOR_STORE_DIR, f"{document.vector_store_id}.faiss")
            if os.path.exists(old_vector_path):
                os.remove(old_vector_path)
                logger.info(f"删除旧向量库: {old_vector_path}")
        
        # 创建新向量库（每个文档独立存储）
        from app.services.vector_store import get_embedding_dimension
        dimension = get_embedding_dimension(settings.OLLAMA_EMBED_MODEL)
        vector_store = VectorStore(dimension=dimension)
        logger.info(f"创建新向量库: {vector_path}")
        
        # 添加文档块
        metadatas = [{
            "document_id": document.id,
            "document_title": document.title,
            "chunk_index": i,
            "total_chunks": len(chunks)
        } for i in range(len(chunks))]
        
        vector_store.add_texts(chunks, embeddings, metadatas)
        
        # 保存向量库
        vector_store.save(vector_path)
        logger.info(f"向量库已保存: {vector_path}")
        
        # 4. 更新文档状态
        document.is_vectorized = True
        document.vector_store_id = vector_store_id
        document.word_count = result["word_count"]
        document.summary = result["summary"]
        document.status = DocumentStatus.COMPLETED
        
        # 5. 更新知识库的文档计数
        from app.models.knowledge_base import KnowledgeBase
        kb = db.query(KnowledgeBase).filter(KnowledgeBase.id == document.knowledge_base_id).first()
        if kb:
            kb.document_count = db.query(Document).filter(
                Document.knowledge_base_id == document.knowledge_base_id
            ).count()
        
        db.commit()
        
        logger.info(f"文档 {document.title} 向量化成功")
        
        return {
            "message": "向量化成功",
            "document_id": document.id,
            "chunk_count": len(chunks),
            "vector_store_id": document.vector_store_id
        }
        
    except Exception as e:
        logger.error(f"向量化失败: {str(e)}", exc_info=True)
        db.rollback()
        raise HTTPException(status_code=500, detail=f"向量化失败: {str(e)}")


@router.get("/{document_id}/download")
async def download_document(
    document_id: str,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    下载文档
    
    Args:
        document_id: 文档ID
        current_user: 当前用户
        db: 数据库会话
        
    Returns:
        FileResponse: 文件下载响应
    """
    # 查询文档
    document = db.query(Document).filter(Document.id == document_id).first()
    
    if not document:
        raise HTTPException(status_code=404, detail="文档不存在")
    
    # 检查文件是否存在
    if not os.path.exists(document.file_path):
        raise HTTPException(status_code=404, detail="文件不存在")
    
    # 根据文件类型返回正确的 MIME 类型，以支持浏览器内预览
    # 对于 PDF、图片等可预览文件，使用实际的 MIME 类型
    # 其他文件使用 application/octet-stream 强制下载
    media_type = document.file_type
    if not media_type or media_type == 'application/octet-stream':
        # 如果没有存储 MIME 类型，则强制下载
        media_type = 'application/octet-stream'
    
    # 返回文件
    return FileResponse(
        path=document.file_path,
        filename=document.file_name,
        media_type=media_type
    )


@router.post("/{document_id}/auto-classify")
async def auto_classify_document(
    document_id: str,
    model_name: Optional[str] = None,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    使用AI自动分类单个文档
    
    Args:
        document_id: 文档ID
        model_name: 指定使用的模型（可选）
        current_user: 当前用户
        db: 数据库会话
    
    Returns:
        分类建议结果
    """
    # 查询文档
    document = db.query(Document).filter(Document.id == document_id).first()
    if not document:
        raise HTTPException(status_code=404, detail="文档不存在")
    
    # 权限检查（只能对自己的文档进行分类）
    if document.upload_user_id != str(current_user.id):
        raise HTTPException(status_code=403, detail="无权操作此文档")
    
    # 执行AI分类
    classifier = AIClassifier()
    
    # 从localStorage获取用户偏好（前端需要通过body传递）
    user_preferences = None
    
    category_id, confidence, reasoning = await classifier.classify_document(
        document=document,
        db=db,
        model_name=model_name,
        user_preferences=user_preferences
    )
    
    # 获取分类名称
    category_name = None
    if category_id:
        from app.models.category import Category
        category = db.query(Category).filter(Category.id == category_id).first()
        if category:
            category_name = category.name
    
    return {
        "document_id": document_id,
        "document_title": document.title,
        "suggested_category_id": category_id,
        "suggested_category_name": category_name,
        "confidence": confidence,
        "reasoning": reasoning,
        "success": category_id is not None
    }


@router.post("/{document_id}/apply-classification")
async def apply_classification(
    document_id: str,
    category_id: str,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    应用AI分类建议到文档
    
    Args:
        document_id: 文档ID
        category_id: 分类ID
        current_user: 当前用户
        db: 数据库会话
    
    Returns:
        更新结果
    """
    # 查询文档
    document = db.query(Document).filter(Document.id == document_id).first()
    if not document:
        raise HTTPException(status_code=404, detail="文档不存在")
    
    # 权限检查
    if document.upload_user_id != str(current_user.id):
        raise HTTPException(status_code=403, detail="无权操作此文档")
    
    # 验证分类存在
    from app.models.category import Category
    category = db.query(Category).filter(Category.id == category_id).first()
    if not category:
        raise HTTPException(status_code=404, detail="分类不存在")
    
    # 更新文档分类
    document.category_id = category_id
    document.updated_at = datetime.utcnow()
    db.commit()
    
    logger.info(
        f"用户 {current_user.username} 应用AI分类: "
        f"文档 {document.title} -> {category.name}"
    )
    
    return {
        "message": "分类应用成功",
        "document_id": document_id,
        "category_id": category_id,
        "category_name": category.name
    }


@router.post("/batch-classify")
async def batch_classify_documents(
    document_ids: List[str],
    model_name: Optional[str] = None,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """
    批量对文档进行AI自动分类
    
    Args:
        document_ids: 文档ID列表
        model_name: 指定使用的模型（可选）
        current_user: 当前用户
        db: 数据库会话
    
    Returns:
        批量分类结果
    """
    # 查询文档（只能对自己的文档进行分类）
    documents = db.query(Document).filter(
        and_(
            Document.id.in_(document_ids),
            Document.upload_user_id == str(current_user.id)
        )
    ).all()
    
    if not documents:
        raise HTTPException(status_code=404, detail="未找到可操作的文档")
    
    # 执行批量分类
    classifier = AIClassifier()
    results = await classifier.batch_classify_documents(
        documents=documents,
        db=db,
        model_name=model_name,
        user_preferences=None
    )
    
    # 增强结果信息
    for result in results:
        if result['category_id']:
            from app.models.category import Category
            category = db.query(Category).filter(
                Category.id == result['category_id']
            ).first()
            if category:
                result['category_name'] = category.name
    
    logger.info(
        f"用户 {current_user.username} 批量分类了 {len(documents)} 个文档"
    )
    
    return {
        "total": len(documents),
        "results": results
    }
