# app/api/rag/routes.py

import os
import json
import time
import requests
import numpy as np
from typing import List, Dict, Any
from fastapi import APIRouter, Request, HTTPException
from sse_starlette.sse import EventSourceResponse
from pymilvus import connections, Collection, utility

from config.settings import settings
from config.model_config import ModelConfig
from app.api.rag.schemas import (
    QuestionRequest, 
    RAGResponse, 
    SourceInfo, 
    ImageSourceInfo,
    RAGStats,
    IntentInfo,
    UserLevelInfo
)
from app.api.rag.utils import (
    connect_milvus,
    search_text_vectors,
    search_image_vectors,
    call_llm_api,
    call_llm_stream,
    build_prompt_with_sources,
    build_prompt_with_sources_and_history,
    call_aliyun_embedding_api,
    _calculate_text_similarity
)
from app.services.intent_recognition import IntentRecognizer

# 导入数据库相关函数
try:
    from app.core.sql_database import add_message_to_db, init_database, check_database_status, get_all_messages, get_conversation_history
    # 初始化数据库
    init_database()
    DB_AVAILABLE = True
except ImportError as e:
    DB_AVAILABLE = False
    import logging
    logging.warning(f"数据库模块不可用，QA内容将不会保存到数据库: {e}")

router = APIRouter()

# 全局模型实例（已移除，改用API调用）

@router.post("/ask", response_model=RAGResponse, summary="多模态RAG问答")
async def ask_question(request: QuestionRequest):
    """多模态RAG问答接口"""
    from app.core.logging import get_logger
    logger = get_logger(__name__)
    
    start_time = time.time()
    logger.info(f"开始处理RAG问答请求: {request.question}")
    logger.info(f"请求参数: top_k={request.top_k}, include_images={request.include_images}")
    
    try:
        # 连接Milvus
        logger.info("连接Milvus数据库...")
        connect_milvus()
        logger.info("Milvus连接成功")
        
        # 1. 文本向量化查询
        logger.info("开始向量化用户问题...")
        query_text_vector = call_aliyun_embedding_api(request.question)
        logger.info(f"问题向量化完成，向量维度: {query_text_vector.shape}")
        
        # 2. 文本向量召回
        logger.info(f"开始文本向量检索，top_k={request.top_k}, knowledge_base={request.knowledge_base}...")
        text_chunks, text_scores, text_sources, text_file_types, text_position_info = search_text_vectors(
            query_text_vector.tolist(), request.top_k, request.knowledge_base
        )
        logger.info(f"文本检索完成，找到 {len(text_chunks)} 个相关文本块")
        
        # 过滤低相似度的文本块
        filtered_text_chunks = []
        filtered_text_scores = []
        filtered_text_sources = []
        filtered_text_file_types = []
        filtered_text_position_info = []
        
        for i, (chunk, score, source, file_type, pos_info) in enumerate(zip(text_chunks, text_scores, text_sources, text_file_types, text_position_info)):
            if score >= 0.6:  # 相似度阈值
                filtered_text_chunks.append(chunk)
                filtered_text_scores.append(score)
                filtered_text_sources.append(source)
                filtered_text_file_types.append(file_type)
                filtered_text_position_info.append(pos_info)
                logger.info(f"保留文本块 {i+1}: 相似度={score:.4f} >= 0.6")
            else:
                logger.info(f"过滤文本块 {i+1}: 相似度={score:.4f} < 0.6")
        
        # 更新过滤后的结果
        text_chunks = filtered_text_chunks
        text_scores = filtered_text_scores
        text_sources = filtered_text_sources
        text_file_types = filtered_text_file_types
        text_position_info = filtered_text_position_info
        
        logger.info(f"相似度过滤后，保留 {len(text_chunks)} 个高质量文本块")
        
        # 记录检索结果详情
        for i, (chunk, score, source, file_type) in enumerate(zip(text_chunks, text_scores, text_sources, text_file_types)):
            logger.info(f"文本块 {i+1}: 相似度={score:.4f}, 来源={source}, 类型={file_type}")
            logger.debug(f"文本块 {i+1} 内容: {chunk[:100]}...")
        
        # 3. 图片向量召回
        if request.include_images:
            logger.info(f"开始图片向量检索，top_k={request.top_k}, knowledge_base={request.knowledge_base}...")
            
            try:
                # 方法1：使用专门的图片查询向量生成函数
                from app.api.rag.utils import generate_image_query_vector
                
                logger.info("生成专门的图片查询向量...")
                query_image_vector = generate_image_query_vector(request.question)
                
                # 使用图片查询向量进行检索
                image_chunks, image_scores, image_sources, image_file_types, image_timestamp_info = search_image_vectors(
                    query_image_vector.tolist(), request.top_k, request.knowledge_base
                )
                logger.info(f"图片检索完成，找到 {len(image_chunks)} 个相关图片")
                
                # 如果相似度都很低，尝试方法2：使用更宽松的阈值
                if image_scores and max(image_scores) < 0.3:
                    logger.info("图片相似度较低，尝试使用更宽松的阈值...")
                    # 重新搜索，使用更低的阈值
                    image_chunks, image_scores, image_sources, image_file_types, image_timestamp_info = search_image_vectors(
                        query_image_vector.tolist(), request.top_k * 2, request.knowledge_base  # 增加检索数量
                    )
                    logger.info(f"宽松搜索完成，找到 {len(image_chunks)} 个相关图片")
                        
            except Exception as e:
                logger.warning(f"图片向量检索失败，尝试备用方案: {e}")
                # 备用方案：使用文本查询向量进行图片搜索
                try:
                    logger.info("尝试使用文本查询向量进行图片搜索...")
                    image_chunks, image_scores, image_sources, image_file_types, image_timestamp_info = search_image_vectors(
                        query_text_vector.tolist(), request.top_k, request.knowledge_base
                    )
                    logger.info(f"备用方案成功，找到 {len(image_chunks)} 个图片结果")
                except Exception as e2:
                    logger.warning(f"备用方案也失败，跳过图片检索: {e2}")
                    image_chunks = []
                    image_scores = []
                    image_sources = []
                    image_file_types = []
                    image_timestamp_info = []
            
            # 过滤低相似度的图片块（使用更宽松的阈值）
            filtered_image_chunks = []
            filtered_image_scores = []
            filtered_image_sources = []
            filtered_image_file_types = []
            filtered_image_timestamp_info = []
            
            # 图片相似度阈值：如果最高相似度很低，使用更宽松的阈值
            image_threshold = 0.6
            if image_scores and max(image_scores) < 0.3:
                image_threshold = 0.1  # 使用更宽松的阈值
                logger.info(f"图片相似度较低，使用宽松阈值: {image_threshold}")
            # 如果相似度都是负数，使用更宽松的阈值
            if image_scores and max(image_scores) < 0:
                image_threshold = -0.1  # 允许负相似度
                logger.info(f"图片相似度为负数，使用负阈值: {image_threshold}")
            
            for i, (chunk, score, source, file_type, ts_info) in enumerate(zip(image_chunks, image_scores, image_sources, image_file_types, image_timestamp_info)):
                if score >= image_threshold:
                    filtered_image_chunks.append(chunk)
                    filtered_image_scores.append(score)
                    filtered_image_sources.append(source)
                    filtered_image_file_types.append(file_type)
                    filtered_image_timestamp_info.append(ts_info)
                    logger.info(f"保留图片块 {i+1}: 相似度={score:.4f} >= {image_threshold}")
                else:
                    logger.info(f"过滤图片块 {i+1}: 相似度={score:.4f} < {image_threshold}")
            
            # 更新过滤后的结果
            image_chunks = filtered_image_chunks
            image_scores = filtered_image_scores
            image_sources = filtered_image_sources
            image_file_types = filtered_image_file_types
            image_timestamp_info = filtered_image_timestamp_info
            
            logger.info(f"相似度过滤后，保留 {len(image_chunks)} 个高质量图片块")
            image_paths = image_chunks  # 图片路径就是图片块
        else:
            image_sources = []
            image_paths = []
            image_scores = []
            image_file_types = []
            image_timestamp_info = []
            logger.info("图片向量检索已禁用")
        
        # 4. 获取对话历史（多轮对话）
        conversation_history = []
        if DB_AVAILABLE and request.conversation_id:
            conversation_history = get_conversation_history(request.conversation_id, limit=5)
            logger.info(f"获取到 {len(conversation_history)} 条对话历史")
            if conversation_history:
                logger.info("对话历史:")
                for i, msg in enumerate(conversation_history[-4:], 1):  # 只显示最近4条
                    role = "用户" if msg["role"] == "user" else "助手"
                    content = msg["content"][:50] + "..." if len(msg["content"]) > 50 else msg["content"]
                    logger.info(f"  {i}. {role}: {content}")
        
        # 5. 意图识别和用户水平判断
        logger.info("开始意图识别和用户水平判断...")
        intent_info = IntentRecognizer.recognize_intent(request.question)
        user_level_info = IntentRecognizer.recognize_user_level(request.question, conversation_history)
        
        logger.info(f"识别结果 - 意图: {intent_info['intent_type']}, 用户水平: {user_level_info['user_level']}")
        logger.info(f"意图描述: {intent_info['description']}")
        logger.info(f"用户水平描述: {user_level_info['description']}")
        
        # 6. 构建自适应prompt（包含图片信息）
        logger.info("开始构建自适应prompt...")
        
        # 检查是否有检索到的内容
        has_retrieved_content = len(text_chunks) > 0 or len(image_paths) > 0
        
        if has_retrieved_content:
            # 构建包含文本和图片的prompt
            prompt = IntentRecognizer.build_adaptive_prompt(
                request.question, 
                text_chunks, 
                intent_info, 
                user_level_info,
                image_paths=image_paths  # 添加图片路径
            )
            logger.info(f"自适应Prompt构建完成，长度: {len(prompt)} 字符")
            logger.info(f"包含 {len(text_chunks)} 个文本块和 {len(image_paths)} 个图片")
        else:
            # 如果没有检索到内容，使用直接问答模式
            prompt = IntentRecognizer.build_direct_qa_prompt(
                request.question,
                intent_info,
                user_level_info
            )
            logger.info(f"直接问答Prompt构建完成，长度: {len(prompt)} 字符")
            logger.info("未检索到相关内容，使用直接问答模式")
        
        logger.debug(f"构建的Prompt: {prompt}")
        
        # 5. 调用大语言模型
        logger.info("开始调用大语言模型...")
        answer = call_llm_api(prompt)
        logger.info(f"大语言模型调用完成，回答长度: {len(answer)} 字符")
        logger.debug(f"生成的回答: {answer}")
        
        # 7. 整理返回结果
        logger.info("开始整理返回结果...")
        text_source_info = []
        for i, (chunk, score, source, file_type, pos_info) in enumerate(zip(text_chunks, text_scores, text_sources, text_file_types, text_position_info)):
            text_source_info.append(SourceInfo(
                index=i + 1,
                content=chunk[:200] + "..." if len(chunk) > 200 else chunk,
                score=float(score),
                source=source,
                file_type=file_type,
                page_num=pos_info.get('page_num'),
                slide_num=pos_info.get('slide_num'),
                paragraph_num=pos_info.get('paragraph_num')
            ))
        
        # 构建知识库检索证据
        evidence_data = {
            "text_evidence": [],
            "image_evidence": [],
            "summary": "=== 知识库检索证据 ===",
            "intent_info": intent_info,
            "user_level_info": user_level_info
        }
        
        # 添加文本证据
        if text_chunks:
            for i, (chunk, score, source, pos_info) in enumerate(zip(text_chunks, text_scores, text_sources, text_position_info)):
                evidence_data["text_evidence"].append({
                    "index": i + 1,
                    "similarity": float(score),
                    "source": source,
                    "content": chunk[:500] + "..." if len(chunk) > 500 else chunk,
                    "page_num": pos_info.get('page_num'),
                    "slide_num": pos_info.get('slide_num'),
                    "paragraph_num": pos_info.get('paragraph_num')
                })
        
        # 添加图片证据
        if image_paths:
            for i, (img_path, score, source, ts_info) in enumerate(zip(image_paths, image_scores, image_sources, image_timestamp_info)):
                evidence_data["image_evidence"].append({
                    "index": i + 1,
                    "similarity": float(score),
                    "source": source,
                    "path": img_path,
                    "timestamp": ts_info.get('timestamp'),
                    "frame_index": ts_info.get('frame_index')
                })
        
        # 保存QA内容到数据库
        if DB_AVAILABLE and request.conversation_id and answer.strip():
            try:
                msg_id = add_message_to_db(
                    conversation_id=request.conversation_id,
                    chat_type="rag_chat",
                    query=request.question,
                    response=answer,
                    meta_data=evidence_data  # 将检索证据保存到meta_data字段
                )
                logger.info(f"QA内容已保存到数据库，消息ID: {msg_id}")
                logger.info(f"检索证据包含 {len(evidence_data['text_evidence'])} 个文本证据, {len(evidence_data['image_evidence'])} 个图片证据")
            except Exception as db_error:
                logger.error(f"保存QA内容到数据库失败: {db_error}")
                import traceback
                logger.error(f"数据库错误详情: {traceback.format_exc()}")
        elif not DB_AVAILABLE:
            logger.warning("数据库模块不可用，QA内容未保存")
        elif not request.conversation_id:
            logger.info("未提供conversation_id，QA内容未保存")
        elif not answer.strip():
            logger.info("回答内容为空，QA内容未保存")
        
        image_source_info = []
        for i, (img_path, score, source, file_type, ts_info) in enumerate(zip(image_paths, image_scores, image_sources, image_file_types, image_timestamp_info)):
            image_source_info.append(ImageSourceInfo(
                index=i + 1,
                image_path=img_path,
                filename=os.path.basename(img_path),
                score=float(score),
                source=source,
                file_type=file_type,
                timestamp=ts_info.get('timestamp'),
                frame_index=ts_info.get('frame_index')
            ))
        
        # 获取所有知识库文件
        knowledge_files = list(set(text_sources + image_sources))
        logger.info(f"知识库文件列表: {knowledge_files}")
        
        response_time = time.time() - start_time
        logger.info(f"RAG问答处理完成，总耗时: {response_time:.2f}秒")
        
        return RAGResponse(
            answer=answer,
            text_sources=text_source_info,
            image_sources=image_source_info,
            knowledge_files=knowledge_files,
            intent_info=IntentInfo(**intent_info),
            user_level_info=UserLevelInfo(**user_level_info)
        )
        
    except Exception as e:
        logger.error(f"RAG问答处理失败: {str(e)}")
        import traceback
        logger.error(f"详细错误信息: {traceback.format_exc()}")
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        if "default" in connections.list_connections():
            connections.disconnect("default")
            logger.info("已断开Milvus连接")

@router.post("/ask_stream", summary="流式多模态RAG问答")
async def ask_question_stream(request: Request, data: QuestionRequest):
    """流式多模态RAG问答接口"""
    from app.core.logging import get_logger
    logger = get_logger(__name__)
    
    logger.info(f"开始处理流式RAG问答请求: {data.question}")
    logger.info(f"请求参数: top_k={data.top_k}, include_images={data.include_images}")
    
    def event_gen():
        start_time = time.time()
        try:
            # 连接Milvus
            logger.info("连接Milvus数据库...")
            connect_milvus()
            logger.info("Milvus连接成功")
            
            # 1. 文本向量化查询
            logger.info("开始向量化用户问题...")
            query_text_vector = call_aliyun_embedding_api(data.question)
            logger.info(f"问题向量化完成，向量维度: {query_text_vector.shape}")
            
            # 2. 文本向量召回
            text_chunks = []
            text_scores = []
            text_sources = []
            text_file_types = []
            text_position_info = []
            
            try:
                logger.info(f"开始文本向量检索，top_k={data.top_k}, knowledge_base={data.knowledge_base}...")
                text_chunks, text_scores, text_sources, text_file_types, text_position_info = search_text_vectors(
                    query_text_vector.tolist(), data.top_k, data.knowledge_base
                )
                logger.info(f"文本检索完成，找到 {len(text_chunks)} 个相关文本块")
                
                # 智能过滤文本块
                filtered_text_chunks = []
                filtered_text_scores = []
                filtered_text_sources = []
                filtered_text_file_types = []
                filtered_text_position_info = []
                
                for i, (chunk, score, source, file_type, pos_info) in enumerate(zip(text_chunks, text_scores, text_sources, text_file_types, text_position_info)):
                    # 过滤太短的文本块
                    if len(chunk.strip()) < 10:  # 降低长度要求
                        logger.info(f"过滤文本块 {i+1}: 内容太短 ({len(chunk)} 字符)")
                        continue
                    
                    # 过滤相似度过低的结果
                    if score < 0.5:  # 进一步降低相似度阈值
                        logger.info(f"过滤文本块 {i+1}: 相似度过低 ({score:.4f})")
                        continue
                    
                    # 过滤重复内容
                    is_duplicate = False
                    for existing_chunk in filtered_text_chunks:
                        if _calculate_text_similarity(chunk, existing_chunk) > 0.7:
                            is_duplicate = True
                            logger.info(f"过滤文本块 {i+1}: 重复内容")
                            break
                    
                    if not is_duplicate:
                        filtered_text_chunks.append(chunk)
                        filtered_text_scores.append(score)
                        filtered_text_sources.append(source)
                        filtered_text_file_types.append(file_type)
                        filtered_text_position_info.append(pos_info)
                        logger.info(f"保留文本块 {i+1}: 相似度={score:.4f} >= 0.5, 长度={len(chunk)}")
                
                # 更新过滤后的结果
                text_chunks = filtered_text_chunks
                text_scores = filtered_text_scores
                text_sources = filtered_text_sources
                text_file_types = filtered_text_file_types
                text_position_info = filtered_text_position_info
                
                logger.info(f"相似度过滤后，保留 {len(text_chunks)} 个高质量文本块")
                
            except Exception as e:
                logger.warning(f"文本向量检索失败，跳过知识库检索: {e}")
                logger.info("将直接调用大模型进行回答")
            
            # 3. 图片向量召回
            image_chunks = []
            image_scores = []
            image_sources = []
            image_file_types = []
            image_timestamp_info = []
            
            if data.include_images:
                try:
                    logger.info(f"开始图片向量检索，top_k={data.top_k}, knowledge_base={data.knowledge_base}...")
                    image_chunks, image_scores, image_sources, image_file_types, image_timestamp_info = search_image_vectors(
                        query_text_vector.tolist(), data.top_k, data.knowledge_base
                    )
                    logger.info(f"图片检索完成，找到 {len(image_chunks)} 个相关图片")
                    
                    # 智能过滤图片块
                    filtered_image_chunks = []
                    filtered_image_scores = []
                    filtered_image_sources = []
                    filtered_image_file_types = []
                    filtered_image_timestamp_info = []
                    
                    for i, (chunk, score, source, file_type, ts_info) in enumerate(zip(image_chunks, image_scores, image_sources, image_file_types, image_timestamp_info)):
                        # 过滤相似度过低的结果
                        if score < 0.5:  # 进一步降低相似度阈值
                            logger.info(f"过滤图片块 {i+1}: 相似度过低 ({score:.4f})")
                            continue
                        
                        # 过滤重复来源
                        is_duplicate = False
                        for existing_chunk in filtered_image_chunks:
                            if existing_chunk == chunk:
                                is_duplicate = True
                                logger.info(f"过滤图片块 {i+1}: 重复来源")
                                break
                        
                        if not is_duplicate:
                            filtered_image_chunks.append(chunk)
                            filtered_image_scores.append(score)
                            filtered_image_sources.append(source)
                            filtered_image_file_types.append(file_type)
                            filtered_image_timestamp_info.append(ts_info)
                            logger.info(f"保留图片块 {i+1}: 相似度={score:.4f} >= 0.5")
                    
                    # 更新过滤后的结果
                    image_chunks = filtered_image_chunks
                    image_scores = filtered_image_scores
                    image_sources = filtered_image_sources
                    image_file_types = filtered_image_file_types
                    image_timestamp_info = filtered_image_timestamp_info
                    
                    logger.info(f"相似度过滤后，保留 {len(image_chunks)} 个高质量图片块")
                    
                except Exception as e:
                    logger.warning(f"图片向量检索失败，跳过图片检索: {e}")
            else:
                logger.info("图片向量检索已禁用")
            
            # 记录图片检索结果详情
            for i, (chunk, score, source, file_type) in enumerate(zip(image_chunks, image_scores, image_sources, image_file_types)):
                logger.info(f"图片块 {i+1}: 相似度={score:.4f}, 来源={source}, 类型={file_type}")
            
            # 4. 获取对话历史（多轮对话）
            conversation_history = []
            if DB_AVAILABLE and data.conversation_id:
                conversation_history = get_conversation_history(data.conversation_id, limit=5)
                logger.info(f"获取到 {len(conversation_history)} 条对话历史")
                if conversation_history:
                    logger.info("对话历史:")
                    for i, msg in enumerate(conversation_history[-4:], 1):  # 只显示最近4条
                        role = "用户" if msg["role"] == "user" else "助手"
                        content = msg["content"][:50] + "..." if len(msg["content"]) > 50 else msg["content"]
                        logger.info(f"  {i}. {role}: {content}")
            
            # 5. 意图识别和用户水平判断
            logger.info("开始意图识别和用户水平判断...")
            intent_info = IntentRecognizer.recognize_intent(data.question)
            user_level_info = IntentRecognizer.recognize_user_level(data.question, conversation_history)
            
            logger.info(f"识别结果 - 意图: {intent_info['intent_type']}, 用户水平: {user_level_info['user_level']}")
            logger.info(f"意图描述: {intent_info['description']}")
            logger.info(f"用户水平描述: {user_level_info['description']}")
            
            # 6. 构建自适应prompt
            logger.info("开始构建自适应prompt...")
            
            # 检查是否有检索到的内容
            has_retrieved_content = len(text_chunks) > 0 or len(image_chunks) > 0
            
            if has_retrieved_content:
                # 有检索内容时，使用RAG模式
                prompt = IntentRecognizer.build_adaptive_prompt(
                    data.question, 
                    text_chunks, 
                    intent_info, 
                    user_level_info
                )
                logger.info(f"RAG模式Prompt构建完成，长度: {len(prompt)} 字符")
            else:
                # 没有检索内容时，使用直接问答模式
                prompt = IntentRecognizer.build_direct_qa_prompt(
                    data.question,
                    intent_info,
                    user_level_info
                )
                logger.info(f"直接问答模式Prompt构建完成，长度: {len(prompt)} 字符")
            
            # 5. 返回知识库证据（如果有检索内容）
            if has_retrieved_content:
                logger.info("返回知识库检索证据...")
                evidence_text = "=== 知识库检索证据 ===\n"
                
                # 添加文本证据
                if text_chunks:
                    evidence_text += "\n【文本证据】\n"
                    for i, (chunk, score, source, pos_info) in enumerate(zip(text_chunks, text_scores, text_sources, text_position_info)):
                        # 构建位置信息
                        position_info = ""
                        if pos_info.get('page_num'):
                            position_info += f"页码：{pos_info['page_num']}"
                        if pos_info.get('slide_num'):
                            position_info += f"  幻灯片：{pos_info['slide_num']}"
                        if pos_info.get('paragraph_num'):
                            position_info += f"  段落：{pos_info['paragraph_num']}"
                        
                        evidence_text += f"\n证据 {i+1}: 相似度 {score:.4f}\n来源: {source}\n{position_info}\n内容: {chunk[:200]}...\n"
                
                # 添加图片证据
                if image_chunks:
                    evidence_text += "\n【图片证据】\n"
                    for i, (chunk, score, source, ts_info) in enumerate(zip(image_chunks, image_scores, image_sources, image_timestamp_info)):
                        # 构建时间戳信息
                        timestamp_info = ""
                        if ts_info.get('timestamp'):
                            timestamp_info += f"时间戳：{ts_info['timestamp']:.2f}秒"
                        if ts_info.get('frame_index'):
                            timestamp_info += f"  帧索引：{ts_info['frame_index']}"
                        
                        evidence_text += f"\n图片 {i+1}: 相似度 {score:.4f}\n来源: {source}\n{timestamp_info}\n描述: {chunk[:200]}...\n"
                
                # 返回证据和AI回答标题
                yield f"{evidence_text}\n\n=== AI回答 ===\n\n"
            else:
                logger.info("无知识库检索内容，直接进行AI回答")
                yield "=== AI回答 ===\n\n"
            
            # 6. 流式调用大语言模型
            logger.info("开始流式调用大语言模型...")
            chunk_count = 0
            answer_content = ""  # 用于收集完整的回答内容
            try:
                for chunk in call_llm_stream(prompt):
                    chunk_count += 1
                    if chunk_count % 10 == 0:  # 每10个chunk记录一次
                        logger.debug(f"已生成 {chunk_count} 个文本块")
                    # 返回正确的SSE格式，EventSourceResponse会自动添加data:前缀
                    # 过滤掉空内容，减少不必要的空行
                    if chunk.strip():
                        answer_content += chunk  # 收集完整回答
                        yield f"{chunk}"
                
                # 发送结束标记
                yield "[DONE]"
                
                response_time = time.time() - start_time
                logger.info(f"流式RAG问答处理完成，总耗时: {response_time:.2f}秒，生成 {chunk_count} 个文本块")
                
                # 构建知识库检索证据
                evidence_data = {
                    "text_evidence": [],
                    "image_evidence": [],
                    "summary": "=== 知识库检索证据 ===",
                    "intent_info": intent_info,
                    "user_level_info": user_level_info
                }
                
                # 添加文本证据
                if text_chunks:
                    for i, (chunk, score, source, pos_info) in enumerate(zip(text_chunks, text_scores, text_sources, text_position_info)):
                        evidence_data["text_evidence"].append({
                            "index": i + 1,
                            "similarity": float(score),
                            "source": source,
                            "content": chunk[:500] + "..." if len(chunk) > 500 else chunk,
                            "page_num": pos_info.get('page_num'),
                            "slide_num": pos_info.get('slide_num'),
                            "paragraph_num": pos_info.get('paragraph_num')
                        })
                
                # 添加图片证据
                if image_chunks:
                    for i, (chunk, score, source, ts_info) in enumerate(zip(image_chunks, image_scores, image_sources, image_timestamp_info)):
                        evidence_data["image_evidence"].append({
                            "index": i + 1,
                            "similarity": float(score),
                            "source": source,
                            "description": chunk[:500] + "..." if len(chunk) > 500 else chunk,
                            "timestamp": ts_info.get('timestamp'),
                            "frame_index": ts_info.get('frame_index')
                        })
                
                # 保存QA内容到数据库
                if DB_AVAILABLE and data.conversation_id and answer_content.strip():
                    try:
                        logger.info("=== 开始保存QA内容到数据库 ===")
                        msg_id = add_message_to_db(
                            conversation_id=data.conversation_id,
                            chat_type="rag_chat",
                            query=data.question,
                            response=answer_content,
                            meta_data=evidence_data  # 将检索证据保存到meta_data字段
                        )
                        logger.info(f"QA内容已保存到数据库，消息ID: {msg_id}")
                        logger.info(f"检索证据包含 {len(evidence_data['text_evidence'])} 个文本证据, {len(evidence_data['image_evidence'])} 个图片证据")
                        
                        # 立即检查数据库状态
                        logger.info("=== 检查数据库状态 ===")
                        check_database_status()
                        
                        # 获取所有消息记录
                        logger.info("=== 获取所有消息记录 ===")
                        get_all_messages(limit=10)
                        
                        logger.info("=== 数据库操作完成 ===")
                    except Exception as db_error:
                        logger.error(f"保存QA内容到数据库失败: {db_error}")
                        import traceback
                        logger.error(f"数据库错误详情: {traceback.format_exc()}")
                elif not DB_AVAILABLE:
                    logger.warning("数据库模块不可用，QA内容未保存")
                elif not data.conversation_id:
                    logger.info("未提供conversation_id，QA内容未保存")
                elif not answer_content.strip():
                    logger.info("回答内容为空，QA内容未保存")
                    
            except Exception as e:
                logger.error(f"流式LLM调用过程中发生错误: {e}")
                import traceback
                logger.error(f"详细错误信息: {traceback.format_exc()}")
                yield f"[ERROR] 流式LLM调用失败: {str(e)}\n\n"
            
            response_time = time.time() - start_time
            logger.info(f"流式RAG问答处理完成，总耗时: {response_time:.2f}秒，生成 {chunk_count} 个文本块")
                
        except Exception as e:
            logger.error(f"流式RAG问答处理失败: {str(e)}")
            import traceback
            logger.error(f"详细错误信息: {traceback.format_exc()}")
            yield f"[ERROR] {str(e)}\n\n"
        finally:
            if "default" in connections.list_connections():
                connections.disconnect("default")
                logger.info("已断开Milvus连接")
    
    return EventSourceResponse(event_gen())

@router.get("/stats", response_model=RAGStats, summary="获取RAG统计信息")
async def get_rag_stats():
    """获取RAG统计信息"""
    try:
        # 这里可以实现统计逻辑
        return RAGStats(
            total_questions=0,
            total_answers=0,
            avg_response_time=0.0,
            success_rate=100.0
        )
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))

@router.get("/knowledge-stats", summary="获取知识库统计信息")
async def get_knowledge_stats():
    """获取知识库统计信息"""
    try:
        connect_milvus()
        
        stats = {}
        
        # 文本集合统计
        if utility.has_collection(settings.MILVUS_COLLECTION):
            text_collection = Collection(settings.MILVUS_COLLECTION)
            stats["text_entities"] = text_collection.num_entities
        
        # 图片集合统计
        if utility.has_collection(settings.MILVUS_COLLECTION_IMAGE):
            image_collection = Collection(settings.MILVUS_COLLECTION_IMAGE)
            stats["image_entities"] = image_collection.num_entities
        
        return stats
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        if "default" in connections.list_connections():
            connections.disconnect("default") 