"""
RAG服务模块
实现检索增强生成的核心功能，支持本地和OpenAI模型
"""
import os
import logging
import torch
from typing import List, Dict, Any, Optional, Union, Literal
from pathlib import Path
import uuid

from langchain.chains import RetrievalQA
from langchain_community.chat_models import ChatOpenAI
from langchain_community.llms import HuggingFacePipeline, LlamaCpp
from langchain.prompts import PromptTemplate
from langchain.chains.question_answering import load_qa_chain
from langchain.vectorstores import VectorStore
from langchain.embeddings import HuggingFaceEmbeddings
from transformers import (
    AutoModelForCausalLM,
    AutoTokenizer,
    pipeline,
    BitsAndBytesConfig
)
from pydantic import BaseModel, Field

# 尝试导入llama-cpp-python
HAS_LLAMA_CPP = False
try:
    from llama_cpp import Llama
    HAS_LLAMA_CPP = True
except ImportError:
    logger.warning("未找到llama-cpp-python，GGUF模型将不可用")
    Llama = None

from config import settings, ModelType
from document_processor import DocumentChunk, DocumentProcessor
from vector_store import VectorStoreManager, VectorStoreConfig

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

class RAGConfig(BaseModel):
    """RAG配置"""
    # 模型配置
    model_type: ModelType = ModelType.OPENAI
    model_name: str = "gpt-3.5-turbo"
    temperature: float = 0.7
    max_tokens: int = 1000
    top_k: int = 4
    chunk_size: int = 1000
    chunk_overlap: int = 200
    
    # 本地模型配置
    local_model_name: str = "gpt2"
    local_embedding_model: str = "sentence-transformers/all-MiniLM-L6-v2"
    device: str = "cuda" if torch.cuda.is_available() else "cpu"
    
    # 量化配置
    load_in_4bit: bool = True
    bnb_4bit_quant_type: str = "nf4"
    bnb_4bit_compute_dtype: str = "float16"
    bnb_4bit_use_double_quant: bool = True
    
    # GGUF模型配置
    gguf_model_path: str = ""
    gguf_n_ctx: int = 4096
    gguf_n_threads: int = 0  # 0表示自动选择
    gguf_n_gpu_layers: int = 0  # 0表示不使用GPU
    
    class Config:
        arbitrary_types_allowed = True  # 允许任意类型，用于处理ModelType

class RAGService:
    """RAG服务"""
    
    def __init__(self, config: Optional[RAGConfig] = None, vector_store_config: Optional[VectorStoreConfig] = None):
        """
        初始化RAG服务
        
        Args:
            config: RAG配置
            vector_store_config: 向量存储配置
        """
        self.config = config or RAGConfig()
        self.vector_store_manager = VectorStoreManager(vector_store_config or VectorStoreConfig())
        self.document_processor = DocumentProcessor(
            chunk_size=self.config.chunk_size,
            chunk_overlap=self.config.chunk_overlap
        )
        self.llm = self._get_llm()
    
    def _get_llm(self):
        """获取语言模型"""
        try:
            if settings.model_type == ModelType.OPENAI:
                return ChatOpenAI(
                    model_name=settings.openai_model,
                    temperature=self.config.temperature,
                    max_tokens=self.config.max_tokens,
                    openai_api_key=settings.openai_api_key,
                    openai_api_base=settings.openai_api_base
                )
            elif settings.model_type == ModelType.GGUF:
                # 加载GGUF模型
                if not HAS_LLAMA_CPP:
                    raise ImportError("需要安装llama-cpp-python以支持GGUF模型: pip install llama-cpp-python")
                
                # 获取模型路径
                model_path = self.config.gguf_model_path
                logger.info(f"正在加载GGUF模型: {model_path}")
                
                # 检查模型文件是否存在
                if not os.path.exists(model_path):
                    # 尝试在models目录下查找
                    model_name = os.path.basename(model_path)
                    local_model_path = os.path.join("models", model_name)
                    if os.path.exists(local_model_path):
                        model_path = local_model_path
                    else:
                        raise FileNotFoundError(f"GGUF模型文件不存在: {model_path} 或 {local_model_path}")
                
                # 创建LlamaCpp实例
                return LlamaCpp(
                    model_path=model_path,
                    n_ctx=self.config.gguf_n_ctx,
                    n_threads=self.config.gguf_n_threads or None,  # 0表示自动选择
                    n_gpu_layers=self.config.gguf_n_gpu_layers,
                    temperature=self.config.temperature,
                    max_tokens=self.config.max_tokens,
                    verbose=False
                )
            else:
                # 加载本地Hugging Face模型
                logger.info(f"正在加载本地模型: {self.config.local_model_name}")
                
                # 配置量化
                bnb_config = None
                if self.config.load_in_4bit:
                    bnb_config = BitsAndBytesConfig(
                        load_in_4bit=True,
                        bnb_4bit_quant_type=self.config.bnb_4bit_quant_type,
                        bnb_4bit_compute_dtype=getattr(torch, self.config.bnb_4bit_compute_dtype),
                        bnb_4bit_use_double_quant=self.config.bnb_4bit_use_double_quant,
                    )
                
                # 加载模型和分词器
                model = AutoModelForCausalLM.from_pretrained(
                    self.config.local_model_name,
                    quantization_config=bnb_config,
                    device_map="auto",
                    trust_remote_code=True
                )
                tokenizer = AutoTokenizer.from_pretrained(
                    self.config.local_model_name,
                    trust_remote_code=True
                )
                
                # 创建文本生成pipeline
                pipe = pipeline(
                    "text-generation",
                    model=model,
                    tokenizer=tokenizer,
                    max_new_tokens=self.config.max_tokens,
                    temperature=self.config.temperature,
                    device=0 if self.config.device == "cuda" else -1,
                )
                
                # 创建HuggingFacePipeline
                return HuggingFacePipeline(pipeline=pipe)
                
        except Exception as e:
            logger.error(f"初始化语言模型失败: {str(e)}")
            raise
    
    def add_documents(self, file_paths: List[str], metadatas: Optional[List[Dict[str, Any]]] = None) -> List[str]:
        """
        添加多个文档到知识库
        
        Args:
            file_paths: 文档路径列表
            metadatas: 元数据列表，与file_paths一一对应
            
        Returns:
            文档ID列表
        """
        if metadatas is None:
            metadatas = [{}] * len(file_paths)
            
        if len(file_paths) != len(metadatas):
            raise ValueError("file_paths和metadatas的长度必须相同")
            
        all_chunk_ids = []
        
        for file_path, metadata in zip(file_paths, metadatas):
            try:
                # 处理文档
                chunks = self.document_processor.process_document(file_path, metadata)
                
                # 生成文档ID
                doc_id = str(uuid.uuid4())
                
                # 为每个块设置文档ID和元数据
                for chunk in chunks:
                    chunk.document_id = doc_id
                    # 添加文件路径到元数据
                    chunk.metadata = chunk.metadata or {}
                    chunk.metadata["source"] = file_path
                    if metadata:
                        chunk.metadata.update(metadata)
                
                # 存储到向量数据库
                chunk_ids = self.vector_store_manager.add_documents(chunks)
                all_chunk_ids.extend(chunk_ids)
                
                logger.info(f"成功处理文档 {file_path}，生成 {len(chunk_ids)} 个块")
                
            except Exception as e:
                logger.error(f"处理文档 {file_path} 失败: {str(e)}")
                raise
                
        return all_chunk_ids
        
    def ingest_document(self, file_path: str, metadata: Optional[Dict[str, Any]] = None) -> List[str]:
        """
        处理并存储文档（兼容旧版API）
        
        Args:
            file_path: 文档路径
            metadata: 元数据
            
        Returns:
            文档块ID列表
        """
        return self.add_documents([file_path], [metadata or {}])
    
    def search(self, query: str, top_k: Optional[int] = None, filter: Optional[Dict[str, Any]] = None) -> List[Dict[str, Any]]:
        """
        执行相似性搜索
        
        Args:
            query: 查询文本
            top_k: 返回结果数量
            filter: 过滤条件
            
        Returns:
            相似文档列表，包含内容和元数据
        """
        top_k = top_k or self.config.top_k
        try:
            # 使用vector_store_manager的search_documents方法获取格式化结果
            results = self.vector_store_manager.search_documents(
                query=query,
                top_k=top_k,
                filter=filter
            )
            
            # 确保结果格式一致
            formatted_results = []
            for result in results:
                formatted_results.append({
                    "content": result.get("content", ""),
                    "score": result.get("score", 0.0),
                    "metadata": result.get("metadata", {})
                })
                
            return formatted_results
            
        except Exception as e:
            logger.error(f"执行搜索失败: {str(e)}")
            raise
    
    def generate_answer(self, query: str, context: List[Dict[str, Any]] = None, **kwargs) -> Dict[str, Any]:
        """
        生成回答
        
        Args:
            query: 问题
            context: 上下文文档列表
            **kwargs: 其他参数
            
        Returns:
            回答结果
        """
        try:
            # 如果没有提供上下文，则执行搜索
            if not context:
                context = self.search(query)
            
            # 构建提示模板
            prompt_template = """根据以下上下文信息回答问题。如果你不知道答案，请说"根据提供的信息，我无法回答这个问题"。
            
            上下文:
            {context}
            
            问题: {question}
            回答:"""
            
            PROMPT = PromptTemplate(
                template=prompt_template, 
                input_variables=["context", "question"]
            )
            
            # 加载QA链
            chain = load_qa_chain(
                llm=self.llm, 
                chain_type="stuff",
                prompt=PROMPT
            )
            
            # 准备文档
            from langchain.schema import Document as LangchainDocument
            docs = [
                LangchainDocument(
                    page_content=doc["content"],
                    metadata=doc.get("metadata", {})
                ) 
                for doc in context
            ]
            
            # 生成回答
            result = chain(
                {"input_documents": docs, "question": query},
                return_only_outputs=True
            )
            
            return {
                "answer": result["output_text"].strip(),
                "sources": [doc.get("metadata", {}) for doc in context]
            }
            
        except Exception as e:
            logger.error(f"生成回答失败: {str(e)}")
            return {
                "answer": "抱歉，生成回答时出现错误。",
                "error": str(e)
            }
    
    def rag_chain(self, query: str, filter: Optional[Dict[str, Any]] = None, top_k: Optional[int] = None) -> Dict[str, Any]:
        """
        完整的RAG流程：检索 + 生成
        
        Args:
            query: 问题
            filter: 过滤条件
            top_k: 返回的文档数量
            
        Returns:
            RAG结果，包含回答和来源
        """
        try:
            # 1. 检索相关文档
            context = self.search(query, top_k=top_k, filter=filter)
            
            # 2. 生成回答
            result = self.generate_answer(query, context)
            
            # 3. 准备响应
            response = {
                "answer": result.get("answer", "抱歉，无法生成回答。"),
                "sources": result.get("sources", []),
                "context": context,
                "status": "success"
            }
            
            # 如果有错误，添加错误信息
            if "error" in result:
                response["status"] = "error"
                response["error"] = result["error"]
                
            return response
            
        except Exception as e:
            error_msg = f"RAG流程执行失败: {str(e)}"
            logger.error(error_msg)
            return {
                "answer": "抱歉，处理您的请求时出现错误。",
                "sources": [],
                "context": [],
                "status": "error",
                "error": error_msg
            }
