import os
from typing import List, Dict, Any, Optional
from dataclasses import dataclass
from abc import ABC, abstractmethod
import numpy as np
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.retrievers import BM25Retriever, EnsembleRetriever
from langchain.retrievers.document_compressors import LLMChainExtractor
from langchain.retrievers import ContextualCompressionRetriever
from langchain_community.llms import DeepSeek
from langchain.output_parsers import PydanticOutputParser
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from pydantic import BaseModel, Field
from sentence_transformers import CrossEncoder


class QueryRewriter(ABC):
    """查询重写基类"""
    @abstractmethod
    def rewrite(self, query: str) -> str:
        pass


class PromptOptimizer(QueryRewriter):
    """提示词优化器"""
    def __init__(self, llm):
        self.llm = llm
        self.template = """请优化以下查询以获得更好的检索结果。
        保持原始意图不变，但可以添加相关术语和上下文。
        
        原始查询: {query}
        
        优化后的查询:"""
        
    def rewrite(self, query: str) -> str:
        prompt = PromptTemplate(template=self.template, input_variables=["query"])
        chain = LLMChain(llm=self.llm, prompt=prompt)
        return chain.run(query=query).strip()


class SubQueryGenerator(QueryRewriter):
    """子查询生成器"""
    def __init__(self, llm):
        self.llm = llm
        self.template = """将以下查询分解为多个相关的子查询，以便更全面地检索信息。
        
        原始查询: {query}
        
        生成3个相关子查询:"""
        
    def rewrite(self, query: str) -> List[str]:
        prompt = PromptTemplate(template=self.template, input_variables=["query"])
        chain = LLMChain(llm=self.llm, prompt=prompt)
        result = chain.run(query=query).strip()
        return [q.strip() for q in result.split('\n') if q.strip()]


class HyDEGenerator:
    """假设性文档生成器"""
    def __init__(self, llm):
        self.llm = llm
        self.template = """基于以下问题，生成一个假设性的文档片段作为可能的答案。
        这个假设答案应该是合理和相关的。
        
        问题: {query}
        
        假设答案:"""
        
    def generate(self, query: str) -> str:
        prompt = PromptTemplate(template=self.template, input_variables=["query"])
        chain = LLMChain(llm=self.llm, prompt=prompt)
        return chain.run(query=query).strip()


class ReverseHyDEGenerator:
    """反向假设问题生成器"""
    def __init__(self, llm):
        self.llm = llm
        self.template = """基于以下文档片段，生成可能会问到的相关问题。
        这些问题应该能帮助找到更多相关信息。
        
        文档片段: {context}
        
        生成3个相关问题:"""
        
    def generate(self, context: str) -> List[str]:
        prompt = PromptTemplate(template=self.template, input_variables=["context"])
        chain = LLMChain(llm=self.llm, prompt=prompt)
        result = chain.run(context=context).strip()
        return [q.strip() for q in result.split('\n') if q.strip()]


class ContextualVectorEmbedding:
    """上下文感知的动态嵌入"""
    def __init__(self, base_embeddings: HuggingFaceEmbeddings):
        self.base_embeddings = base_embeddings
        
    def embed_query(self, query: str, context: Optional[str] = None) -> List[float]:
        if context:
            # 将查询与上下文结合
            enhanced_query = f"Context: {context}\nQuery: {query}"
            return self.base_embeddings.embed_query(enhanced_query)
        return self.base_embeddings.embed_query(query)


class Reranker:
    """检索结果重排序器"""
    def __init__(self):
        self.cross_encoder = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')
        
    def rerank(self, query: str, documents: List[Document], top_k: int = 3) -> List[Document]:
        # 计算相关性分数
        pairs = [[query, doc.content] for doc in documents]
        scores = self.cross_encoder.predict(pairs)
        
        # 结合文档和分数
        doc_scores = list(zip(documents, scores))
        # 按分数排序
        ranked_docs = [doc for doc, _ in sorted(doc_scores, key=lambda x: x[1], reverse=True)]
        return ranked_docs[:top_k]


class PromptCompressor:
    """提示词压缩器"""
    def __init__(self, llm):
        self.llm = llm
        
    def compress_llmlingua(self, prompt: str) -> str:
        """使用LLMLingua压缩提示词"""
        # 这里应该集成实际的LLMLingua库
        # 目前使用简化版本
        template = """请压缩以下提示词，保持关键信息但减少长度：
        
        原始提示词: {prompt}
        
        压缩后的提示词:"""
        chain = LLMChain(
            llm=self.llm,
            prompt=PromptTemplate(template=template, input_variables=["prompt"])
        )
        return chain.run(prompt=prompt).strip()
    
    def compress_long_llmlingua(self, prompt: str) -> str:
        """使用LongLLMLingua压缩长文本提示词"""
        # 这里应该集成实际的LongLLMLingua库
        # 目前使用简化版本
        return self.compress_llmlingua(prompt)  # 临时使用基础版本


class HybridSearch:
    """混合搜索实现"""
    def __init__(self, vector_store, bm25_retriever, reranker):
        self.vector_store = vector_store
        self.bm25_retriever = bm25_retriever
        self.reranker = reranker
        
    def search(self, query: str, k: int = 5) -> List[Document]:
        # 获取向量检索结果
        vector_results = self.vector_store.similarity_search(query, k=k)
        
        # 获取BM25检索结果
        bm25_results = self.bm25_retriever.get_relevant_documents(query)
        
        # 合并结果
        combined_results = list(set(vector_results + bm25_results))
        
        # 重排序
        return self.reranker.rerank(query, combined_results, top_k=k)


@dataclass
class Document:
    """文档类，用于存储文本内容及其元数据"""
    content: str
    metadata: Dict[str, Any]


class AnswerWithSources(BaseModel):
    """结构化输出格式"""
    answer: str = Field(description="对问题的详细回答")
    sources: List[str] = Field(description="用于生成答案的来源文档")
    confidence: float = Field(description="答案的置信度，范围0-1")
    reasoning: str = Field(description="推理过程说明")


class AdvancedRAG:
    def __init__(self, model_name: str = "deepseek-ai/deepseek-v3"):
        """初始化Advanced RAG系统"""
        self.llm = DeepSeek(model=model_name)
        self.embeddings = HuggingFaceEmbeddings(
            model_name="sentence-transformers/all-MiniLM-L6-v2"
        )
        self.cove_embeddings = ContextualVectorEmbedding(self.embeddings)
        
        # 初始化组件
        self.prompt_optimizer = PromptOptimizer(self.llm)
        self.sub_query_generator = SubQueryGenerator(self.llm)
        self.hyde_generator = HyDEGenerator(self.llm)
        self.reverse_hyde_generator = ReverseHyDEGenerator(self.llm)
        self.reranker = Reranker()
        self.prompt_compressor = PromptCompressor(self.llm)
        
        # 存储和检索器
        self.vector_store = None
        self.hybrid_retriever = None
        self.output_parser = PydanticOutputParser(pydantic_object=AnswerWithSources)

    def load_documents(self, file_path: str) -> List[Document]:
        """加载并处理文档"""
        print(f"正在加载文件: {file_path}")
        loader = PyPDFLoader(file_path)
        documents = loader.load()
        
        # 文本分割
        text_splitter = RecursiveCharacterTextSplitter(
            chunk_size=1000,
            chunk_overlap=200,
            length_function=len,
        )
        chunks = text_splitter.split_documents(documents)
        print(f"文档已分割为 {len(chunks)} 个块")
        return chunks

    def setup_retrievers(self, documents: List[Document]):
        """设置检索器"""
        # 向量存储
        self.vector_store = Chroma.from_documents(
            documents=documents,
            embedding=self.embeddings
        )
        vector_retriever = self.vector_store.as_retriever(
            search_kwargs={"k": 3}
        )

        # BM25检索器
        bm25_retriever = BM25Retriever.from_documents(documents)
        bm25_retriever.k = 3

        # 混合检索
        self.hybrid_search = HybridSearch(
            self.vector_store,
            bm25_retriever,
            self.reranker
        )

        # 上下文压缩
        compressor = LLMChainExtractor.from_llm(self.llm)
        self.hybrid_retriever = ContextualCompressionRetriever(
            base_compressor=compressor,
            base_retriever=EnsembleRetriever(
                retrievers=[vector_retriever, bm25_retriever],
                weights=[0.7, 0.3]
            )
        )

    def process_query(self, query: str) -> List[str]:
        """预处理查询"""
        # 1. 提示词优化
        optimized_query = self.prompt_optimizer.rewrite(query)
        
        # 2. 生成子查询
        sub_queries = self.sub_query_generator.rewrite(query)
        
        # 3. HyDE
        hyde_doc = self.hyde_generator.generate(query)
        hyde_embedding = self.cove_embeddings.embed_query(query, context=hyde_doc)
        
        # 4. Reverse HyDE
        reverse_queries = self.reverse_hyde_generator.generate(hyde_doc)
        
        # 合并所有查询
        all_queries = [optimized_query] + sub_queries + reverse_queries
        return all_queries

    def retrieve_and_process(self, queries: List[str]) -> List[Document]:
        """检索和后处理"""
        all_docs = []
        for query in queries:
            # 混合搜索
            docs = self.hybrid_search.search(query)
            all_docs.extend(docs)
        
        # 去重
        unique_docs = list({doc.content: doc for doc in all_docs}.values())
        
        # 重排序
        ranked_docs = self.reranker.rerank(queries[0], unique_docs)  # 使用原始查询
        return ranked_docs

    def generate_answer(self, query: str, documents: List[Document]) -> AnswerWithSources:
        """生成答案"""
        context = "\n\n".join([doc.content for doc in documents])
        
        # 压缩提示词
        compressed_context = self.prompt_compressor.compress_llmlingua(context)
        
        # 生成答案
        prompt = self.generate_prompt()
        chain = LLMChain(llm=self.llm, prompt=prompt)
        response = chain.run(context=compressed_context, question=query)
        
        # 解析结构化输出
        parsed_response = self.output_parser.parse(response)
        
        # 自查证
        verified_response = self.self_verify(parsed_response)
        return verified_response

    def generate_prompt(self) -> PromptTemplate:
        """生成提示模板"""
        template = """基于以下上下文回答问题。如果无法从上下文中找到答案，请明确说明。

上下文:
{context}

问题: {question}

请提供以下格式的回答:
{format_instructions}

回答:"""
        
        return PromptTemplate(
            template=template,
            input_variables=["context", "question"],
            partial_variables={
                "format_instructions": self.output_parser.get_format_instructions()
            }
        )

    def self_verify(self, answer: AnswerWithSources) -> AnswerWithSources:
        """自查证机制"""
        verify_prompt = PromptTemplate(
            template="""请验证以下答案的准确性和完整性：

答案: {answer}
来源: {sources}
推理过程: {reasoning}

请评估:
1. 答案是否完全基于提供的来源
2. 推理过程是否合理
3. 是否存在任何矛盾或不准确之处

如果发现问题，请降低置信度并在推理过程中说明原因。

{format_instructions}""",
            input_variables=["answer", "sources", "reasoning"],
            partial_variables={
                "format_instructions": self.output_parser.get_format_instructions()
            }
        )

        verify_chain = LLMChain(llm=self.llm, prompt=verify_prompt)
        verified_result = verify_chain.run(
            answer=answer.answer,
            sources=answer.sources,
            reasoning=answer.reasoning
        )
        
        return self.output_parser.parse(verified_result)

    def answer_question(self, question: str) -> AnswerWithSources:
        """处理问题并生成答案"""
        # 1. 预处理查询
        processed_queries = self.process_query(question)
        
        # 2. 检索和后处理
        relevant_docs = self.retrieve_and_process(processed_queries)
        
        # 3. 生成答案
        return self.generate_answer(question, relevant_docs)


def main():
    # 初始化系统
    rag = AdvancedRAG()
    
    # 检查命令行参数
    if len(os.sys.argv) < 2:
        print("使用方法: python main.py <pdf文件路径>")
        os.sys.exit(1)
    
    pdf_path = os.sys.argv[1]
    if not os.path.exists(pdf_path):
        print(f"错误: 文件 {pdf_path} 不存在")
        os.sys.exit(1)
    
    # 加载和处理文档
    documents = rag.load_documents(pdf_path)
    rag.setup_retrievers(documents)
    
    # 交互式问答
    print("\n系统初始化完成! 您可以开始提问 (输入'退出'结束)")
    while True:
        question = input("\n请输入您的问题: ")
        if question.lower() in ["退出", "exit", "quit"]:
            break
            
        try:
            result = rag.answer_question(question)
            print("\n回答:", result.answer)
            print("\n参考来源:", "\n".join(result.sources))
            print("\n置信度:", result.confidence)
            print("\n推理过程:", result.reasoning)
        except Exception as e:
            print(f"处理问题时出错: {str(e)}")


if __name__ == "__main__":
    main() 