import numpy as np
import torch
from transformers import AutoTokenizer, AutoModel, AutoModelForSeq2SeqLM
from sentence_transformers import SentenceTransformer
import faiss
import json
from typing import List, Dict, Any
import re


class SimpleRAG:
    def __init__(self, embedding_model_name="sentence-transformers/all-MiniLM-L6-v2",
                 llm_model_name="google/flan-t5-base"):
        # 初始化嵌入模型
        self.embedding_model = SentenceTransformer(embedding_model_name)
        self.embedding_dim = self.embedding_model.get_sentence_embedding_dimension()

        # 初始化生成模型
        self.llm_tokenizer = AutoTokenizer.from_pretrained(llm_model_name)
        self.llm_model = AutoModelForSeq2SeqLM.from_pretrained(llm_model_name)

        # 初始化向量数据库
        self.index = faiss.IndexFlatIP(self.embedding_dim)  # 内积相似度
        self.documents = []
        self.metadata = []

    def preprocess_text(self, text: str) -> str:
        """文本预处理"""
        # 移除多余空白
        text = re.sub(r'\s+', ' ', text)
        # 基本清理
        text = text.strip()
        return text

    def chunk_documents(self, text: str, chunk_size: int = 512, overlap: int = 50) -> List[str]:
        """文档分块"""
        words = text.split()
        chunks = []

        for i in range(0, len(words), chunk_size - overlap):
            chunk = ' '.join(words[i:i + chunk_size])
            chunks.append(chunk)

        return chunks

    def add_documents(self, documents: List[Dict[str, Any]]):
        """添加文档到知识库"""
        all_chunks = []
        all_metadata = []

        for doc in documents:
            content = self.preprocess_text(doc['content'])
            chunks = self.chunk_documents(content)

            for chunk in chunks:
                all_chunks.append(chunk)
                all_metadata.append({
                    'source': doc.get('source', 'unknown'),
                    'title': doc.get('title', ''),
                    'chunk_index': len(all_chunks) - 1
                })

        # 生成嵌入向量
        embeddings = self.embedding_model.encode(
            all_chunks, show_progress_bar=True)

        # 添加到向量数据库
        if len(self.documents) == 0:
            self.index = faiss.IndexFlatIP(self.embedding_dim)

        # 归一化向量（用于余弦相似度）
        faiss.normalize_L2(embeddings)
        self.index.add(embeddings.astype('float32'))

        self.documents.extend(all_chunks)
        self.metadata.extend(all_metadata)

        print(f"添加了 {len(all_chunks)} 个文档块")

    def retrieve(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:
        """语义检索"""
        # 查询嵌入
        query_embedding = self.embedding_model.encode([query])
        faiss.normalize_L2(query_embedding)

        # 搜索
        distances, indices = self.index.search(
            query_embedding.astype('float32'), top_k)

        # 组装结果
        results = []
        for i, idx in enumerate(indices[0]):
            if idx < len(self.documents):
                results.append({
                    'content': self.documents[idx],
                    'metadata': self.metadata[idx],
                    'score': float(distances[0][i])
                })

        return results

    def query_expansion(self, query: str) -> List[str]:
        """查询扩展 - 生成多个相关查询"""
        prompt = f"""
        基于以下问题，生成3个相关的搜索查询，从不同角度探索这个问题。
        原始问题: {query}
        生成的查询:
        1. {query}
        2. """

        inputs = self.llm_tokenizer(
            prompt, return_tensors="pt", max_length=512, truncation=True)
        outputs = self.llm_model.generate(
            inputs.input_ids,
            max_length=100,
            num_return_sequences=1,
            do_sample=True,
            temperature=0.7
        )

        expanded_queries = [query]  # 总是包含原始查询
        generated_text = self.llm_tokenizer.decode(
            outputs[0], skip_special_tokens=True)

        # 解析生成的查询
        lines = generated_text.split('\n')
        for line in lines:
            if re.match(r'^\d+\.\s', line):
                expanded_query = re.sub(r'^\d+\.\s', '', line).strip()
                if expanded_query and expanded_query != query:
                    expanded_queries.append(expanded_query)

        return expanded_queries[:4]  # 最多4个查询

    def reciprocal_rank_fusion(self, all_results: List[List[Dict]], k: int = 60) -> List[Dict]:
        """结果融合算法"""
        fused_scores = {}

        for results in all_results:
            for rank, result in enumerate(results):
                content = result['content']
                if content not in fused_scores:
                    fused_scores[content] = 0
                fused_scores[content] += 1 / (rank + k + 1)

        # 转换为列表并排序
        fused_results = [
            {
                'content': content,
                'score': score,
                'metadata': next((r['metadata'] for r in results_list for results_list in all_results
                                  if r['content'] == content), {})
            }
            for content, score in fused_scores.items()
        ]

        fused_results.sort(key=lambda x: x['score'], reverse=True)
        return fused_results

    def format_context(self, retrieved_docs: List[Dict]) -> str:
        """格式化上下文"""
        context = "相关文档信息:\n\n"
        for i, doc in enumerate(retrieved_docs):
            context += f"[文档 {i+1} - 相关性: {doc['score']:.3f}]\n"
            context += f"{doc['content']}\n\n"
        return context

    def generate_answer(self, query: str, context: str) -> str:
        """生成最终答案"""
        prompt = f"""基于以下上下文信息，回答问题。如果上下文没有提供足够信息，请如实说明。

上下文:
{context}

问题: {query}

答案:"""

        inputs = self.llm_tokenizer(
            prompt, return_tensors="pt", max_length=1024, truncation=True)

        outputs = self.llm_model.generate(
            inputs.input_ids,
            max_length=256,
            num_beams=5,
            early_stopping=True,
            temperature=0.3
        )

        answer = self.llm_tokenizer.decode(
            outputs[0], skip_special_tokens=True)
        return answer

    def rag_fusion_retrieve(self, query: str, top_k: int = 5) -> List[Dict]:
        """RAG Fusion 检索"""
        # 1. 查询扩展
        expanded_queries = self.query_expansion(query)
        print(f"生成的查询: {expanded_queries}")

        # 2. 并行检索
        all_results = []
        for q in expanded_queries:
            results = self.retrieve(q, top_k=top_k * 2)  # 每个查询多检索一些
            all_results.append(results)

        # 3. 结果融合
        fused_results = self.reciprocal_rank_fusion(all_results)

        return fused_results[:top_k]

    def ask(self, question: str, use_rag_fusion: bool = True) -> Dict[str, Any]:
        """完整的问答流程"""
        # 检索相关文档
        if use_rag_fusion:
            retrieved_docs = self.rag_fusion_retrieve(question, top_k=5)
        else:
            retrieved_docs = self.retrieve(question, top_k=5)

        # 格式化上下文
        context = self.format_context(retrieved_docs)

        # 生成答案
        answer = self.generate_answer(question, context)

        return {
            'question': question,
            'answer': answer,
            'retrieved_documents': retrieved_docs,
            'context_preview': context[:500] + "..." if len(context) > 500 else context
        }

# 示例使用


def main():
    # 初始化 RAG 系统
    rag = SimpleRAG()

    # 准备示例文档
    documents = [
        {
            'title': '人工智能介绍',
            'content': '''
            人工智能是计算机科学的一个分支，旨在创建能够执行通常需要人类智能的任务的系统。
            机器学习是人工智能的一个子领域，它使计算机能够在没有明确编程的情况下学习。
            深度学习是机器学习的一种，使用神经网络模拟人脑的工作方式。
            ''',
            'source': '科技百科'
        },
        {
            'title': '机器学习应用',
            'content': '''
            机器学习已广泛应用于各个领域，包括自然语言处理、计算机视觉、推荐系统等。
            在自然语言处理中，Transformer架构已成为最先进的模型，如BERT和GPT。
            这些模型在文本生成、翻译、摘要等任务上表现出色。
            ''',
            'source': '技术期刊'
        }
    ]

    # 添加文档到知识库
    rag.add_documents(documents)

    # 提问
    question = "什么是深度学习？"
    result = rag.ask(question, use_rag_fusion=True)

    print(f"问题: {result['question']}")
    print(f"答案: {result['answer']}")
    print("\n检索到的相关文档:")
    for i, doc in enumerate(result['retrieved_documents']):
        print(f"{i+1}. [{doc['score']:.3f}] {doc['content'][:100]}...")


if __name__ == "__main__":
    main()
