"""
聊天API - 完整版
"""
from fastapi import APIRouter, Depends, HTTPException
from fastapi.responses import StreamingResponse
from sqlalchemy.orm import Session
from sqlalchemy import func
from typing import Optional, List
from app.api.deps import get_db, get_current_user
from app.models.user import User
from app.models.chat_history import ChatSession, ChatMessage as ChatMessageModel
from app.schemas.chat import (
    ChatRequest,
    ChatResponse,
    ChatSessionCreate,
    ChatSessionResponse,
    ChatMessageResponse
)
from app.services.qa_engine import create_rag_engine
from app.services.llm_client import ollama_client
import logging
import json
import uuid
from datetime import datetime

logger = logging.getLogger(__name__)

router = APIRouter(prefix="/chat", tags=["chat"])


# ==================== 会话管理 ====================

@router.get("/sessions", response_model=List[ChatSessionResponse])
async def list_sessions(
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """获取用户的所有会话列表"""
    try:
        sessions = db.query(ChatSession).filter(
            ChatSession.user_id == str(current_user.id)
        ).order_by(ChatSession.updated_at.desc()).all()
        
        # 统计每个会话的消息数
        result = []
        for session in sessions:
            msg_count = db.query(func.count(ChatMessageModel.id)).filter(
                ChatMessageModel.session_id == session.id
            ).scalar() or 0
            
            result.append({
                "id": session.id,
                "title": session.title,
                "knowledge_base_id": session.knowledge_base_id,
                "created_at": session.created_at,
                "message_count": msg_count
            })
        
        return result
    except Exception as e:
        logger.error(f"获取会话列表失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取会话列表失败: {str(e)}")


@router.post("/sessions", response_model=ChatSessionResponse)
async def create_session(
    session: ChatSessionCreate,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """创建新会话"""
    try:
        new_session = ChatSession(
            id=str(uuid.uuid4()),
            title=session.title,
            user_id=str(current_user.id),
            knowledge_base_id=session.knowledge_base_id
        )
        
        db.add(new_session)
        db.commit()
        db.refresh(new_session)
        
        return {
            "id": new_session.id,
            "title": new_session.title,
            "knowledge_base_id": new_session.knowledge_base_id,
            "created_at": new_session.created_at,
            "message_count": 0
        }
    except Exception as e:
        logger.error(f"创建会话失败: {str(e)}")
        db.rollback()
        raise HTTPException(status_code=500, detail=f"创建会话失败: {str(e)}")


@router.delete("/sessions/{session_id}")
async def delete_session(
    session_id: str,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """删除会话"""
    session = db.query(ChatSession).filter(
        ChatSession.id == session_id,
        ChatSession.user_id == str(current_user.id)
    ).first()
    
    if not session:
        raise HTTPException(status_code=404, detail="会话不存在")
    
    try:
        db.delete(session)
        db.commit()
        return {"message": "删除成功"}
    except Exception as e:
        logger.error(f"删除会话失败: {str(e)}")
        db.rollback()
        raise HTTPException(status_code=500, detail=f"删除失败: {str(e)}")


@router.get("/sessions/{session_id}/messages", response_model=List[ChatMessageResponse])
async def get_session_messages(
    session_id: str,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """获取会话的所有消息"""
    # 验证会话所有权
    session = db.query(ChatSession).filter(
        ChatSession.id == session_id,
        ChatSession.user_id == str(current_user.id)
    ).first()
    
    if not session:
        raise HTTPException(status_code=404, detail="会话不存在")
    
    try:
        messages = db.query(ChatMessageModel).filter(
            ChatMessageModel.session_id == session_id
        ).order_by(ChatMessageModel.created_at).all()
        
        result = []
        for msg in messages:
            result.append({
                "id": msg.id,
                "session_id": msg.session_id,
                "role": msg.role,
                "content": msg.content,
                "sources": json.loads(msg.sources) if msg.sources else [],
                "created_at": msg.created_at
            })
        
        return result
    except Exception as e:
        logger.error(f"获取消息失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"获取消息失败: {str(e)}")


# ==================== 聊天接口 ====================

@router.post("/", response_model=ChatResponse)
async def chat(
    request: ChatRequest,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """聊天接口（非流式）"""
    try:
        # 如果没有session_id，创建新会话
        if not request.session_id:
            new_session = ChatSession(
                id=str(uuid.uuid4()),
                title=request.message[:50],
                user_id=str(current_user.id),
                knowledge_base_id=request.knowledge_base_id
            )
            db.add(new_session)
            db.commit()
            session_id = new_session.id
        else:
            session_id = request.session_id
        
        # 保存用户消息
        user_msg = ChatMessageModel(
            id=str(uuid.uuid4()),
            session_id=session_id,
            role="user",
            content=request.message
        )
        db.add(user_msg)
        
        # 加载向量存储
        vector_store = None
        context_docs = []
        if request.knowledge_base_id:
            try:
                from app.services.vector_store import VectorStore
                from app.core.config import settings
                import os
                
                # 构建向量库路径
                vector_path = os.path.join(settings.UPLOAD_DIR, "vectors", request.knowledge_base_id)
                
                if os.path.exists(f"{vector_path}.index"):
                    vector_store = VectorStore()
                    vector_store.load(vector_path)
                    logger.info(f"成功加载知识库向量: {request.knowledge_base_id}")
                    
                    # 生成问题的向量并检索相关文档
                    question_embedding = await ollama_client.generate_embedding(request.message)
                    results = vector_store.similarity_search(question_embedding, k=5)
                    context_docs = [text for text, score, metadata in results]
                    logger.info(f"检索到 {len(context_docs)} 个相关文档片段")
                else:
                    logger.warning(f"知识库向量不存在: {vector_path}")
            except Exception as e:
                logger.error(f"加载向量存储失败: {str(e)}")
        
        # 创建RAG引擎
        rag_engine = create_rag_engine(vector_store=vector_store)
        
        # 回答问题
        answer = await rag_engine.answer_question(
            question=request.message,
            context_docs=context_docs if context_docs else None,
            temperature=request.temperature,
            max_tokens=request.max_tokens
        )
        
        # 保存AI回复
        assistant_msg = ChatMessageModel(
            id=str(uuid.uuid4()),
            session_id=session_id,
            role="assistant",
            content=answer
        )
        db.add(assistant_msg)
        db.commit()
        
        return ChatResponse(
            message=answer,
            session_id=session_id,
            sources=[]
        )
    except Exception as e:
        logger.error(f"聊天失败: {str(e)}")
        db.rollback()
        raise HTTPException(status_code=500, detail=f"聊天失败: {str(e)}")


@router.post("/stream")
async def chat_stream(
    request: ChatRequest,
    current_user: User = Depends(get_current_user),
    db: Session = Depends(get_db)
):
    """聊天接口（流式输出）"""
    
    # 如果没有session_id，创建新会话
    if not request.session_id:
        new_session = ChatSession(
            id=str(uuid.uuid4()),
            title=request.message[:50],
            user_id=str(current_user.id),
            knowledge_base_id=request.knowledge_base_id
        )
        db.add(new_session)
        db.commit()
        session_id = new_session.id
    else:
        session_id = request.session_id
    
    # 保存用户消息
    user_msg = ChatMessageModel(
        id=str(uuid.uuid4()),
        session_id=session_id,
        role="user",
        content=request.message
    )
    db.add(user_msg)
    db.commit()
    
    async def generate():
        full_answer = ""
        assistant_msg_id = str(uuid.uuid4())
        
        try:
            # 发送会话ID
            yield f"data: {json.dumps({'type': 'session_id', 'session_id': session_id})}\n\n"
            
            # 根据knowledge_base_id决定使用哪个向量库
            # None = 不使用知识库（纯LLM）
            # "all" = 检索所有知识库
            # "id1,id2,id3" = 检索多个指定知识库
            # 其他 = 检索单个指定知识库
            vector_store = None
            kb_ids = []
            
            print(f"\n========== 聊天请求 ==========")
            print(f"问题: {request.message}")
            print(f"knowledge_base_id: {request.knowledge_base_id}")
            print(f"document_ids: {request.document_ids}")
            logger.info(f"========== 聊天请求 ========== 问题: {request.message}, KB_ID: {request.knowledge_base_id}, DOC_IDS: {request.document_ids}")
            
            # 如果指定了文档ID，则只检索这些文档
            doc_ids = []
            if request.document_ids:
                doc_ids = [doc_id.strip() for doc_id in request.document_ids.split(',') if doc_id.strip()]
                print(f"使用指定文档: {doc_ids}")
                logger.info(f"使用指定文档: {doc_ids}")
                
                # 验证文档是否已向量化
                from app.models.document import Document
                docs = db.query(Document).filter(Document.id.in_(doc_ids), Document.is_vectorized == True).all()
                print(f"查询到 {len(docs)} 个已向量化的文档")
                
                if docs:
                    # 直接加载这些文档的向量（新架构：按文档ID存储）
                    from app.services.vector_store import load_documents_vector_stores
                    print(f"正在加载 {len(doc_ids)} 个文档的向量...")
                    logger.info(f"正在加载 {len(doc_ids)} 个文档的向量")
                    vector_store = load_documents_vector_stores(doc_ids)
                    if vector_store:
                        print(f"✅ 向量库加载成功，共 {vector_store.count()} 个向量")
                        logger.info(f"向量库加载成功，共 {vector_store.count()} 个向量")
                    else:
                        print("❌ 向量库加载失败或为空")
                        logger.warning(f"向量库加载失败或为空")
                else:
                    print("❌ 未找到指定的已向量化文档")
                    logger.warning("未找到指定的已向量化文档")
            elif request.knowledge_base_id == "all":
                # 获取所有知识库的所有已向量化文档
                from app.models.document import Document
                docs = db.query(Document).filter(Document.is_vectorized == True).all()
                doc_ids = [doc.id for doc in docs]
                print(f"使用所有知识库的文档: {len(doc_ids)} 个")
                
                if doc_ids:
                    from app.services.vector_store import load_documents_vector_stores
                    vector_store = load_documents_vector_stores(doc_ids)
                    if vector_store:
                        print(f"向量库加载成功，共 {vector_store.count()} 个向量")
                        logger.info(f"向量库加载成功，共 {vector_store.count()} 个向量")
            elif request.knowledge_base_id and ',' in request.knowledge_base_id:
                # 多个知识库（获取这些知识库下的所有已向量化文档）
                kb_ids = [kb_id.strip() for kb_id in request.knowledge_base_id.split(',') if kb_id.strip()]
                print(f"使用多个知识库: {kb_ids}")
                
                from app.models.document import Document
                docs = db.query(Document).filter(
                    Document.knowledge_base_id.in_(kb_ids),
                    Document.is_vectorized == True
                ).all()
                doc_ids = [doc.id for doc in docs]
                print(f"从 {len(kb_ids)} 个知识库中找到 {len(doc_ids)} 个已向量化文档")
                
                if doc_ids:
                    from app.services.vector_store import load_documents_vector_stores
                    vector_store = load_documents_vector_stores(doc_ids)
                    if vector_store:
                        print(f"向量库加载成功，共 {vector_store.count()} 个向量")
                        logger.info(f"向量库加载成功，共 {vector_store.count()} 个向量")
            elif request.knowledge_base_id:
                # 单个知识库（获取该知识库下的所有已向量化文档）
                from app.models.document import Document
                docs = db.query(Document).filter(
                    Document.knowledge_base_id == request.knowledge_base_id,
                    Document.is_vectorized == True
                ).all()
                doc_ids = [doc.id for doc in docs]
                print(f"使用单个知识库: {request.knowledge_base_id}，找到 {len(doc_ids)} 个已向量化文档")
                
                if doc_ids:
                    from app.services.vector_store import load_documents_vector_stores
                    vector_store = load_documents_vector_stores(doc_ids)
                    if vector_store:
                        print(f"向量库加载成功，共 {vector_store.count()} 个向量")
                        logger.info(f"向量库加载成功，共 {vector_store.count()} 个向量")
            else:
                print("未指定知识库，使用纯LLM模式")
                logger.info("未指定知识库，使用纯LLM模式")
            
            # 创建RAG引擎
            rag_engine = create_rag_engine(vector_store=vector_store)
            print(f"RAG引擎已创建，vector_store={'有' if vector_store else '无'}")
            logger.info(f"RAG引擎已创建，vector_store={'有' if vector_store else '无'}")
            
            # 流式回答（使用retrieve_and_answer来自动检索）
            # 首先检索文档（用于获取sources）
            sources = []
            if vector_store and vector_store.count() > 0:
                try:
                    print(f"开始检索文档...")
                    # 生成问题向量
                    question_embedding = await ollama_client.generate_embedding(request.message)
                    print(f"✅ 问题向量生成成功，维度: {len(question_embedding)}")
                    
                    # 动态调整检索数量
                    # 如果指定了文档，检索更多片段以覆盖所有文档
                    k = len(doc_ids) * 3 if doc_ids else 20  # 每个文档检索3个片段，无指定文档时检索20个
                    k = max(k, 20)  # 至少检索20个
                    k = min(k, 50)  # 最多50个，避免过载
                    print(f"检索参数 k={k}, doc_ids={len(doc_ids) if doc_ids else 0}")
                    
                    # 检索相关文档
                    results = vector_store.similarity_search(question_embedding, k=k)
                    print(f"✅ 检索完成，获得 {len(results)} 个相关片段")
                    logger.info(f"检索到 {len(results)} 个相关片段（k={k}）")
                    
                    # 如果指定了文档ID，过滤结果
                    if doc_ids:
                        results = [(text, score, metadata) for text, score, metadata in results 
                                   if metadata.get("document_id") in doc_ids]
                        logger.info(f"按文档ID过滤后剩余 {len(results)} 个结果")
                    
                    # 提取sources信息 - 按文档分组，去重
                    seen_docs = {}  # {document_id: [chunks]}
                    
                    # 设置相似度阈值：使用余弦相似度，范围0-1，越大越相似
                    # 优化：根据生产反馈适当降低阈值，避免“答案准确但未展示参考”的情况
                    # - 若用户选择了具体文档：阈值 0.40（更宽松，信任用户选择）
                    # - 若未选择文档：阈值 0.50（适中，保证质量）
                    if doc_ids:
                        similarity_threshold = 0.40  # 放宽阈值
                        print(f"\n📌 用户选择了 {len(doc_ids)} 个文档，使用阈值 {similarity_threshold}")
                    else:
                        similarity_threshold = 0.50  # 适中阈值
                        print(f"\n🔍 自动检索模式，使用阈值 {similarity_threshold}")
                    
                    print(f"\n🔍 检索结果详情（前10个）:")
                    for idx, (text, score, metadata) in enumerate(results[:10]):
                        doc_id = metadata.get("document_id")
                        doc_title = metadata.get("document_title", "未知")
                        print(f"  [{idx+1}] 文档: {doc_title[:30]}... | 余弦相似度: {score:.4f} ({score*100:.1f}%) | 片段: {text[:50]}...")
                        
                        # 只保留相似度高于阈值的结果
                        if score >= similarity_threshold and doc_id:
                            if doc_id not in seen_docs:
                                seen_docs[doc_id] = []
                            # 每个文档最多保留2个最相关的片段
                            if len(seen_docs[doc_id]) < 2:
                                seen_docs[doc_id].append({
                                    "document_id": doc_id,
                                    "document_title": metadata.get("document_title"),
                                    "chunk_index": metadata.get("chunk_index"),
                                    "similarity": float(score),  # 余弦相似度，0-1范围
                                    "excerpt": text[:200]
                                })
                    
                    # 将分组后的结果展平，每个文档贡献1-2个片段
                    for doc_id, chunks in seen_docs.items():
                        sources.extend(chunks)
                    
                    # 如果指定了文档，显示所有相关文档；否则只显示前5个文档
                    if not doc_ids:
                        sources = sources[:5]

                    # 若阈值过滤后依然没有任何来源，启用回退策略：展示Top-3最高分片段
                    if not sources and results:
                        print("⚠️ 未达到阈值，使用Top-3回退策略展示参考片段")
                        top_fallback = results[:3]
                        for text, score, metadata in top_fallback:
                            doc_id_fb = metadata.get("document_id")
                            if doc_id_fb:
                                sources.append({
                                    "document_id": doc_id_fb,
                                    "document_title": metadata.get("document_title"),
                                    "chunk_index": metadata.get("chunk_index"),
                                    "similarity": float(score),
                                    "excerpt": text[:200]
                                })
                    
                    logger.info(f"已提取 {len(sources)} 个引用来源（相似度>{similarity_threshold}），来自 {len(seen_docs)} 个文档")
                    print(f"✅ 成功提取 {len(sources)} 个引用来源（相似度>{similarity_threshold}），来自 {len(seen_docs)} 个文档")
                except Exception as e:
                    print(f"❌ 检索失败: {str(e)}")
                    logger.error(f"提取sources失败: {str(e)}", exc_info=True)
                    import traceback
                    traceback.print_exc()
            
            stream_generator = await rag_engine.retrieve_and_answer(
                question=request.message,
                temperature=request.temperature,
                max_tokens=request.max_tokens,
                stream=True
            )
            
            async for chunk in stream_generator:
                full_answer += chunk
                # 发送SSE格式的数据
                yield f"data: {json.dumps({'type': 'content', 'content': chunk})}\n\n"
            
            # 发送sources信息
            if sources:
                yield f"data: {json.dumps({'type': 'sources', 'sources': sources})}\n\n"
            
            # 流结束后，一次性保存完整的AI回复
            if full_answer:
                assistant_msg = ChatMessageModel(
                    id=assistant_msg_id,
                    session_id=session_id,
                    role="assistant",
                    content=full_answer,
                    sources=json.dumps(sources, ensure_ascii=False) if sources else None
                )
                db.add(assistant_msg)
                db.commit()
                logger.info(f"已保存完整回复，消息ID: {assistant_msg_id}, 长度: {len(full_answer)}, sources: {len(sources) if sources else 0}")
            
            # 发送完成信号
            yield f"data: {json.dumps({'type': 'done'})}\n\n"
        except Exception as e:
            logger.error(f"流式聊天失败: {str(e)}", exc_info=True)
            db.rollback()
            yield f"data: {json.dumps({'type': 'error', 'message': str(e)})}\n\n"
    
    return StreamingResponse(
        generate(),
        media_type="text/event-stream",
        headers={
            "Cache-Control": "no-cache",
            "Connection": "keep-alive",
        }
    )


@router.get("/health")
async def check_llm_health(current_user: User = Depends(get_current_user)):
    """检查LLM服务健康状态"""
    try:
        is_healthy = await ollama_client.check_health()
        return {
            "status": "healthy" if is_healthy else "unhealthy",
            "ollama_base_url": ollama_client.base_url,
            "llm_model": ollama_client.llm_model,
            "embed_model": ollama_client.embed_model
        }
    except Exception as e:
        logger.error(f"健康检查失败: {str(e)}")
        raise HTTPException(status_code=500, detail=f"健康检查失败: {str(e)}")
