from langchain_core.prompts import PromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_openai import ChatOpenAI
from langchain_deepseek import ChatDeepSeek
from langchain_community.llms import HuggingFacePipeline
from langchain_community.llms import Ollama
from langchain_core.documents import Document
from typing import List, Optional, Dict, Any
import os
from dotenv import load_dotenv

# 加载环境变量
load_dotenv()

class ResponseGenerator:
    """响应生成器，基于检索到的文档生成回答"""
    def __init__(
        self,
        llm_type: str = "ollama",
        model_name: str = "qwen3:4b",
        temperature: float = 0.7,
        max_tokens: int = 1024,
        **kwargs
    ):
        """
        初始化响应生成器
        :param llm_type: LLM类型 (openai/deepseek/huggingface/ollama)
        :param model_name: 模型名称
        :param temperature: 生成温度
        :param max_tokens: 最大生成 tokens
        :param kwargs: 其他参数
        """
        self.llm_type = llm_type
        self.model_name = model_name
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.llm = self._initialize_llm(** kwargs)
        self.prompt_template = self._create_prompt_template()
        self.chain = self.prompt_template | self.llm | StrOutputParser()

    def _initialize_llm(self, **kwargs) -> Any:
        """初始化语言模型"""
        if self.llm_type == "openai":
            # 使用OpenAI模型
            api_key = os.getenv("OPENAI_API_KEY")
            if not api_key:
                raise ValueError("使用OpenAI模型需要设置OPENAI_API_KEY环境变量")
            return ChatOpenAI(
                model_name=self.model_name,
                temperature=self.temperature,
                max_tokens=self.max_tokens,
                openai_api_key=api_key,
                **kwargs
            )
        elif self.llm_type == "deepseek":
            # 使用DeepSeek模型
            api_key = os.getenv("DEEPSEEK_API_KEY")
            if not api_key:
                raise ValueError("使用DeepSeek模型需要设置DEEPSEEK_API_KEY环境变量")
            return ChatDeepSeek(
                model_name=self.model_name,
                temperature=self.temperature,
                max_tokens=self.max_tokens,
                api_key=api_key,
** kwargs
            )
        elif self.llm_type == "huggingface":
            # 使用HuggingFace本地模型
            try:
                from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
            except ImportError:
                raise ImportError("使用HuggingFace模型需要安装transformers库")

            # 加载模型和分词器
            tokenizer = AutoTokenizer.from_pretrained(self.model_name)
            model = AutoModelForCausalLM.from_pretrained(
                self.model_name,
                device_map="auto",  # 自动选择设备
                **kwargs
            )

            # 创建文本生成管道
            pipe = pipeline(
                "text-generation",
                model=model,
                tokenizer=tokenizer,
                max_new_tokens=self.max_tokens,
                temperature=self.temperature,
                do_sample=True,
                pad_token_id=tokenizer.eos_token_id
            )

            return HuggingFacePipeline(pipeline=pipe)
        elif self.llm_type == "ollama":
            # 使用Ollama本地模型
            return Ollama(
                model=self.model_name,
                temperature=self.temperature,
                num_predict=self.max_tokens,
                **kwargs
            )
        else:
            raise ValueError(f"不支持的LLM类型: {self.llm_type}")

    def _create_prompt_template(self) -> PromptTemplate:
        """创建提示模板"""
        template = """你是一个基于提供的参考文档回答问题的AI助手。

参考文档内容如下:
{context}

基于以上参考文档，回答用户问题。要求:
1. 仅使用参考文档中的信息回答，不要编造内容
2. 如果参考文档中没有相关信息，回答"根据提供的文档，无法回答该问题"
3. 回答应简洁明了，控制在300字以内
4. 在回答末尾使用[来源:文档标题, 页码]格式标注引用来源

用户问题: {question}

回答:"""

        return PromptTemplate(
            input_variables=["context", "question"],
            template=template
        )

    def _format_context(self, documents: List[Document]) -> str:
        """格式化检索到的文档上下文"""
        if not documents:
            return "无相关参考文档"

        context_parts = []
        for i, doc in enumerate(documents, 1):
            # 提取文档元数据
            source = doc.metadata.get("source", f"文档{i}")
            page = doc.metadata.get("page", "")
            page_info = f"(页码: {page})" if page else ""
            doc_id = doc.metadata.get("id", str(i))

            # 格式化文档内容
            content = doc.page_content.strip().replace("\n", " ")
            context_parts.append(
                f"[文档{doc_id}: {source}{page_info}]\n{content}\n"
            )

        return "\n".join(context_parts)

    def _format_citations(self, documents: List[Document]) -> str:
        """格式化引用来源"""
        if not documents:
            return ""

        citations = set()
        for doc in documents:
            source = doc.metadata.get("source", "未知来源")
            page = doc.metadata.get("page", "")
            if page:
                citations.add(f"{source}, 页码{page}")
            else:
                citations.add(source)

        return "[来源: " + ", ".join(citations) + "]"

    def generate(self, question: str, documents: List[Document],** kwargs) -> Dict[str, Any]:
        """
        生成回答
        :param question: 用户问题
        :param documents: 检索到的文档列表
        :return: 包含回答和来源的结果字典
        """
        # 格式化上下文
        context = self._format_context(documents)

        # 生成回答
        try:
            answer = self.chain.invoke({
                "context": context,
                "question": question
            }, **kwargs)

            # 提取引用来源
            citations = self._format_citations(documents)

            # 合并回答和引用
            if answer and not answer.endswith(citations) and citations:
                answer += f"\n{citations}"

            return {
                "answer": answer,
                "sources": [{
                    "source": doc.metadata.get("source", "未知来源"),
                    "page": doc.metadata.get("page", ""),
                    "score": doc.metadata.get("score", 0.0),
                    "content_preview": doc.page_content[:100] + "..."
                } for doc in documents]
            }
        except Exception as e:
            return {
                "answer": f"生成回答时出错: {str(e)}",
                "sources": [],
                "error": str(e)
            }

    def set_prompt_template(self, template: str, input_variables: List[str]) -> None:
        """自定义提示模板"""
        self.prompt_template = PromptTemplate(
            input_variables=input_variables,
            template=template
        )
        self.chain = self.prompt_template | self.llm | StrOutputParser()

    def update_llm_parameters(self, **kwargs) -> None:
        """更新LLM参数"""
        for key, value in kwargs.items():
            if hasattr(self.llm, key):
                setattr(self.llm, key, value)

if __name__ == "__main__":
    # 示例用法
    from src.ingestion.vector_store import VectorStoreManager
    from src.retrieval.retrieval import RetrievalManager

    try:
        # 初始化向量存储
        vector_store = VectorStoreManager(
            embedding_model="huggingface",
            vector_store_type="chroma",
            persist_directory="./vector_db_chroma"
        )

        # 初始化检索管理器
        retrieval_manager = RetrievalManager(
            vector_store_manager=vector_store,
            retrieval_strategy="hybrid",
            top_k=3
        )

        # 初始化回答生成器 (使用HuggingFace本地模型)
        # 注意: 本地模型需要先下载，这里使用较小的模型作为示例
        generator = ResponseGenerator(
            llm_type="huggingface",
            model_name="mistralai/Mistral-7B-v0.1",  # 可替换为其他模型
            temperature=0.5,
            max_tokens=300
        )

        # 或者使用OpenAI模型
        # generator = ResponseGenerator(
        #     llm_type="openai",
        #     model_name="gpt-3.5-turbo",
        #     temperature=0.5
        # )

        # 测试问答流程
        query = "什么是RAG技术?"
        print(f"用户问题: {query}\n")

        # 检索相关文档
        documents = retrieval_manager.retrieve(query)
        print(f"检索到{len(documents)}个相关文档\n")

        # 生成回答
        result = generator.generate(query, documents)

        # 输出结果
        print("AI回答:")
        print(result["answer"])

        print("\n引用来源:")
        for i, source in enumerate(result["sources"]):
            print(f"{i+1}. {source['source']} (相关性分数: {source['score']:.4f})")
            print(f"   内容预览: {source['content_preview']}")

    except Exception as e:
        print(f"问答流程出错: {str(e)}")