# multimodal_rag_api.py

import os
import json
import requests
import numpy as np
from typing import List, Dict, Any, Optional
from fastapi import FastAPI, Body, Request, HTTPException
from sse_starlette.sse import EventSourceResponse
from pydantic import BaseModel
from sentence_transformers import SentenceTransformer
from pymilvus import connections, Collection, utility
import config

app = FastAPI(title="多模态RAG问答API", description="支持文本和图片两路召回的知识库问答系统")

class QuestionRequest(BaseModel):
    question: str
    conversation_id: Optional[str] = None
    include_images: bool = True  # 是否包含图片召回结果
    top_k: int = 10

class RAGResponse(BaseModel):
    answer: str
    text_sources: List[Dict[str, Any]]
    image_sources: List[Dict[str, Any]]
    knowledge_files: List[str]

# 全局模型实例
_text_model = None
_multimodal_model = None

def get_text_model():
    """获取文本嵌入模型"""
    global _text_model
    if _text_model is None:
        _text_model = SentenceTransformer(config.EMBEDDING_MODEL)
    return _text_model

def get_multimodal_model():
    """获取多模态嵌入模型"""
    global _multimodal_model
    if _multimodal_model is None:
        # 这里需要根据实际的多模态模型API进行调用
        # 暂时使用文本模型作为占位符
        _multimodal_model = SentenceTransformer(config.MULTIMODAL_EMBEDDING_MODEL)
    return _multimodal_model

def connect_milvus():
    """连接Milvus数据库"""
    connections.connect("default", host=config.MILVUS_HOST, port=config.MILVUS_PORT)

def search_text_vectors(query_vector: List[float], top_k: int = 10):
    """搜索文本向量"""
    if not utility.has_collection(config.MILVUS_COLLECTION):
        return [], []
    
    collection = Collection(config.MILVUS_COLLECTION)
    collection.load()
    
    search_params = {
        "metric_type": "IP",
        "params": {"nprobe": 10},
    }
    
    results = collection.search(
        data=[query_vector],
        anns_field="vector",
        param=search_params,
        limit=top_k,
        output_fields=["text", "source", "file_type"]
    )
    
    hits = results[0]
    texts = [hit.entity.get('text') for hit in hits]
    scores = [hit.distance for hit in hits]
    sources = [hit.entity.get('source') for hit in hits]
    file_types = [hit.entity.get('file_type') for hit in hits]
    
    return texts, scores, sources, file_types

def search_image_vectors(query_vector: List[float], top_k: int = 10):
    """搜索图片向量"""
    if not utility.has_collection(config.MILVUS_COLLECTION_IMAGE):
        return [], []
    
    collection = Collection(config.MILVUS_COLLECTION_IMAGE)
    collection.load()
    
    search_params = {
        "metric_type": "IP",
        "params": {"nprobe": 10},
    }
    
    results = collection.search(
        data=[query_vector],
        anns_field="vector",
        param=search_params,
        limit=top_k,
        output_fields=["image_path", "source", "file_type"]
    )
    
    hits = results[0]
    image_paths = [hit.entity.get('image_path') for hit in hits]
    scores = [hit.distance for hit in hits]
    sources = [hit.entity.get('source') for hit in hits]
    file_types = [hit.entity.get('file_type') for hit in hits]
    
    return image_paths, scores, sources, file_types

def call_llm_api(prompt: str) -> str:
    """调用大语言模型API"""
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {config.LLM_API_KEY}"
    }
    
    payload = {
        "model": config.LLM_MODEL,
        "messages": [
            {"role": "system", "content": "你是一个智能助手。请根据提供的知识库内容来回答用户的问题。"},
            {"role": "user", "content": prompt}
        ],
        "stream": False
    }
    
    try:
        response = requests.post(config.LLM_API_URL, headers=headers, json=payload, timeout=60)
        response.raise_for_status()
        result = response.json()
        return result.get("choices", [{}])[0].get("message", {}).get("content", "")
    except Exception as e:
        return f"调用大语言模型时发生错误: {str(e)}"

def call_llm_stream(prompt: str):
    """流式调用大语言模型"""
    headers = {
        "Content-Type": "application/json",
        "Authorization": f"Bearer {config.LLM_API_KEY}"
    }
    
    payload = {
        "model": config.LLM_MODEL,
        "stream": True,
        "messages": [
            {"role": "system", "content": "你是一个智能助手。请根据提供的知识库内容来回答用户的问题。"},
            {"role": "user", "content": prompt}
        ]
    }
    
    try:
        with requests.post(config.LLM_API_URL, headers=headers, json=payload, stream=True, timeout=120) as response:
            response.raise_for_status()
            for line in response.iter_lines(decode_unicode=True):
                if not line or line.strip() == "":
                    continue
                if line.startswith("data:"):
                    line = line[5:].strip()
                if line == "[DONE]":
                    break
                try:
                    data = json.loads(line)
                    content = data.get("choices", [{}])[0].get("delta", {}).get("content") \
                        or data.get("choices", [{}])[0].get("message", {}).get("content") \
                        or data.get("data", "")
                    if content:
                        yield content
                except Exception as e:
                    continue
    except Exception as e:
        yield f"\n发生错误: {e}\n"

def build_prompt_with_sources(question: str, text_sources: List[str], image_sources: List[str]) -> str:
    """构建包含知识库内容的prompt"""
    prompt_parts = []
    
    # 添加文本内容
    if text_sources:
        prompt_parts.append("文本知识库内容:")
        for i, text in enumerate(text_sources, 1):
            prompt_parts.append(f"{i}. {text}")
    
    # 添加图片信息（不提供图片内容，只告知用户）
    if image_sources:
        prompt_parts.append("\n相关图片文件:")
        for i, img_path in enumerate(image_sources, 1):
            filename = os.path.basename(img_path)
            prompt_parts.append(f"{i}. {filename}")
    
    # 添加用户问题
    prompt_parts.append(f"\n用户问题: {question}")
    prompt_parts.append("\n请根据上述知识库内容回答用户问题。如果知识库内容与问题无关，请说明情况并根据自己的知识回答。")
    
    return "\n".join(prompt_parts)

@app.post("/ask", response_model=RAGResponse, summary="多模态RAG问答")
async def ask_question(request: QuestionRequest):
    """多模态RAG问答接口"""
    try:
        connect_milvus()
        
        # 1. 文本向量化查询
        text_model = get_text_model()
        query_text_vector = text_model.encode([request.question], normalize_embeddings=True)[0]
        
        # 2. 文本向量召回
        text_chunks, text_scores, text_sources, text_file_types = search_text_vectors(
            query_text_vector.tolist(), request.top_k
        )
        
        # 3. 图片向量召回（如果启用）
        image_sources = []
        image_paths = []
        if request.include_images:
            try:
                multimodal_model = get_multimodal_model()
                # 这里需要根据实际的多模态模型API进行调用
                # 暂时使用随机向量作为占位符
                query_image_vector = np.random.rand(config.IMAGE_VECTOR_DIMENSION).astype(np.float32)
                
                image_paths, image_scores, image_sources, image_file_types = search_image_vectors(
                    query_image_vector.tolist(), request.top_k
                )
            except Exception as e:
                print(f"图片向量召回失败: {e}")
        
        # 4. 构建prompt
        prompt = build_prompt_with_sources(request.question, text_chunks, image_paths)
        
        # 5. 调用大语言模型
        answer = call_llm_api(prompt)
        
        # 6. 整理返回结果
        text_source_info = []
        for i, (chunk, score, source, file_type) in enumerate(zip(text_chunks, text_scores, text_sources, text_file_types)):
            text_source_info.append({
                "index": i + 1,
                "content": chunk[:200] + "..." if len(chunk) > 200 else chunk,
                "score": float(score),
                "source": source,
                "file_type": file_type
            })
        
        image_source_info = []
        for i, (img_path, score, source, file_type) in enumerate(zip(image_paths, image_scores, image_sources, image_file_types)):
            image_source_info.append({
                "index": i + 1,
                "image_path": img_path,
                "filename": os.path.basename(img_path),
                "score": float(score),
                "source": source,
                "file_type": file_type
            })
        
        # 获取所有知识库文件
        knowledge_files = list(set(text_sources + image_sources))
        
        return RAGResponse(
            answer=answer,
            text_sources=text_source_info,
            image_sources=image_source_info,
            knowledge_files=knowledge_files
        )
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        if "default" in connections.list_connections():
            connections.disconnect("default")

@app.post("/ask_stream", summary="流式多模态RAG问答")
async def ask_question_stream(request: Request, data: QuestionRequest):
    """流式多模态RAG问答接口"""
    def event_gen():
        try:
            connect_milvus()
            
            # 1. 文本向量化查询
            text_model = get_text_model()
            query_text_vector = text_model.encode([data.question], normalize_embeddings=True)[0]
            
            # 2. 文本向量召回
            text_chunks, text_scores, text_sources, text_file_types = search_text_vectors(
                query_text_vector.tolist(), data.top_k
            )
            
            # 3. 图片向量召回
            image_sources = []
            image_paths = []
            if data.include_images:
                try:
                    multimodal_model = get_multimodal_model()
                    query_image_vector = np.random.rand(config.IMAGE_VECTOR_DIMENSION).astype(np.float32)
                    
                    image_paths, image_scores, image_sources, image_file_types = search_image_vectors(
                        query_image_vector.tolist(), data.top_k
                    )
                except Exception as e:
                    print(f"图片向量召回失败: {e}")
            
            # 4. 构建prompt
            prompt = build_prompt_with_sources(data.question, text_chunks, image_paths)
            
            # 5. 流式调用大语言模型
            for chunk in call_llm_stream(prompt):
                yield chunk
                
        except Exception as e:
            yield f"data: [ERROR] {str(e)}\n"
        finally:
            if "default" in connections.list_connections():
                connections.disconnect("default")
    
    return EventSourceResponse(event_gen())

@app.get("/health", summary="健康检查")
async def health_check():
    """API健康检查"""
    return {"status": "healthy", "message": "多模态RAG问答API运行正常"}

@app.get("/stats", summary="获取知识库统计信息")
async def get_knowledge_stats():
    """获取知识库统计信息"""
    try:
        connect_milvus()
        
        stats = {}
        
        # 文本集合统计
        if utility.has_collection(config.MILVUS_COLLECTION):
            text_collection = Collection(config.MILVUS_COLLECTION)
            stats["text_entities"] = text_collection.num_entities
        
        # 图片集合统计
        if utility.has_collection(config.MILVUS_COLLECTION_IMAGE):
            image_collection = Collection(config.MILVUS_COLLECTION_IMAGE)
            stats["image_entities"] = image_collection.num_entities
        
        return stats
        
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))
    finally:
        if "default" in connections.list_connections():
            connections.disconnect("default")

if __name__ == "__main__":
    import uvicorn
    uvicorn.run("multimodal_rag_api:app", host="0.0.0.0", port=8001, reload=True) 