{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "不依赖 LangChain 的 RAG 实现反而更加灵活和可控。"
   ]
  },
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 自主实现 RAG 的核心功能\n",
    "1. 文档处理与向量化\n",
    "2. 语义检索\n",
    "3. 查询增强\n",
    "4. 答案生成\n",
    "5. 上下文管理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "from transformers import AutoTokenizer, AutoModel, AutoModelForSeq2SeqLM\n",
    "from sentence_transformers import SentenceTransformer\n",
    "import faiss\n",
    "import json\n",
    "from typing import List, Dict, Any\n",
    "import re\n",
    "\n",
    "\n",
    "class SimpleRAG:\n",
    "    def __init__(self, embedding_model_name=\"sentence-transformers/all-MiniLM-L6-v2\",\n",
    "                 llm_model_name=\"google/flan-t5-base\"):\n",
    "        # 初始化嵌入模型\n",
    "        self.embedding_model = SentenceTransformer(embedding_model_name)\n",
    "        self.embedding_dim = self.embedding_model.get_sentence_embedding_dimension()\n",
    "\n",
    "        # 初始化生成模型\n",
    "        self.llm_tokenizer = AutoTokenizer.from_pretrained(llm_model_name)\n",
    "        self.llm_model = AutoModelForSeq2SeqLM.from_pretrained(llm_model_name)\n",
    "\n",
    "        # 初始化向量数据库\n",
    "        self.index = faiss.IndexFlatIP(self.embedding_dim)  # 内积相似度\n",
    "        self.documents = []\n",
    "        self.metadata = []\n",
    "\n",
    "    def preprocess_text(self, text: str) -> str:\n",
    "        \"\"\"文本预处理\"\"\"\n",
    "        # 移除多余空白\n",
    "        text = re.sub(r'\\s+', ' ', text)\n",
    "        # 基本清理\n",
    "        text = text.strip()\n",
    "        return text\n",
    "\n",
    "    def chunk_documents(self, text: str, chunk_size: int = 512, overlap: int = 50) -> List[str]:\n",
    "        \"\"\"文档分块\"\"\"\n",
    "        words = text.split()\n",
    "        chunks = []\n",
    "\n",
    "        for i in range(0, len(words), chunk_size - overlap):\n",
    "            chunk = ' '.join(words[i:i + chunk_size])\n",
    "            chunks.append(chunk)\n",
    "\n",
    "        return chunks\n",
    "\n",
    "    def add_documents(self, documents: List[Dict[str, Any]]):\n",
    "        \"\"\"添加文档到知识库\"\"\"\n",
    "        all_chunks = []\n",
    "        all_metadata = []\n",
    "\n",
    "        for doc in documents:\n",
    "            content = self.preprocess_text(doc['content'])\n",
    "            chunks = self.chunk_documents(content)\n",
    "\n",
    "            for chunk in chunks:\n",
    "                all_chunks.append(chunk)\n",
    "                all_metadata.append({\n",
    "                    'source': doc.get('source', 'unknown'),\n",
    "                    'title': doc.get('title', ''),\n",
    "                    'chunk_index': len(all_chunks) - 1\n",
    "                })\n",
    "\n",
    "        # 生成嵌入向量\n",
    "        embeddings = self.embedding_model.encode(\n",
    "            all_chunks, show_progress_bar=True)\n",
    "\n",
    "        # 添加到向量数据库\n",
    "        if len(self.documents) == 0:\n",
    "            self.index = faiss.IndexFlatIP(self.embedding_dim)\n",
    "\n",
    "        # 归一化向量（用于余弦相似度）\n",
    "        faiss.normalize_L2(embeddings)\n",
    "        self.index.add(embeddings.astype('float32'))\n",
    "\n",
    "        self.documents.extend(all_chunks)\n",
    "        self.metadata.extend(all_metadata)\n",
    "\n",
    "        print(f\"添加了 {len(all_chunks)} 个文档块\")\n",
    "\n",
    "    def retrieve(self, query: str, top_k: int = 5) -> List[Dict[str, Any]]:\n",
    "        \"\"\"语义检索\"\"\"\n",
    "        # 查询嵌入\n",
    "        query_embedding = self.embedding_model.encode([query])\n",
    "        faiss.normalize_L2(query_embedding)\n",
    "\n",
    "        # 搜索\n",
    "        distances, indices = self.index.search(\n",
    "            query_embedding.astype('float32'), top_k)\n",
    "\n",
    "        # 组装结果\n",
    "        results = []\n",
    "        for i, idx in enumerate(indices[0]):\n",
    "            if idx < len(self.documents):\n",
    "                results.append({\n",
    "                    'content': self.documents[idx],\n",
    "                    'metadata': self.metadata[idx],\n",
    "                    'score': float(distances[0][i])\n",
    "                })\n",
    "\n",
    "        return results\n",
    "\n",
    "    def query_expansion(self, query: str) -> List[str]:\n",
    "        \"\"\"查询扩展 - 生成多个相关查询\"\"\"\n",
    "        prompt = f\"\"\"\n",
    "        基于以下问题，生成3个相关的搜索查询，从不同角度探索这个问题。\n",
    "        原始问题: {query}\n",
    "        生成的查询:\n",
    "        1. {query}\n",
    "        2. \"\"\"\n",
    "\n",
    "        inputs = self.llm_tokenizer(\n",
    "            prompt, return_tensors=\"pt\", max_length=512, truncation=True)\n",
    "        outputs = self.llm_model.generate(\n",
    "            inputs.input_ids,\n",
    "            max_length=100,\n",
    "            num_return_sequences=1,\n",
    "            do_sample=True,\n",
    "            temperature=0.7\n",
    "        )\n",
    "\n",
    "        expanded_queries = [query]  # 总是包含原始查询\n",
    "        generated_text = self.llm_tokenizer.decode(\n",
    "            outputs[0], skip_special_tokens=True)\n",
    "\n",
    "        # 解析生成的查询\n",
    "        lines = generated_text.split('\\n')\n",
    "        for line in lines:\n",
    "            if re.match(r'^\\d+\\.\\s', line):\n",
    "                expanded_query = re.sub(r'^\\d+\\.\\s', '', line).strip()\n",
    "                if expanded_query and expanded_query != query:\n",
    "                    expanded_queries.append(expanded_query)\n",
    "\n",
    "        return expanded_queries[:4]  # 最多4个查询\n",
    "\n",
    "    def reciprocal_rank_fusion(self, all_results: List[List[Dict]], k: int = 60) -> List[Dict]:\n",
    "        \"\"\"结果融合算法\"\"\"\n",
    "        fused_scores = {}\n",
    "\n",
    "        for results in all_results:\n",
    "            for rank, result in enumerate(results):\n",
    "                content = result['content']\n",
    "                if content not in fused_scores:\n",
    "                    fused_scores[content] = 0\n",
    "                fused_scores[content] += 1 / (rank + k + 1)\n",
    "\n",
    "        # 转换为列表并排序\n",
    "        fused_results = [\n",
    "            {\n",
    "                'content': content,\n",
    "                'score': score,\n",
    "                'metadata': next((r['metadata'] for r in results_list for results_list in all_results\n",
    "                                  if r['content'] == content), {})\n",
    "            }\n",
    "            for content, score in fused_scores.items()\n",
    "        ]\n",
    "\n",
    "        fused_results.sort(key=lambda x: x['score'], reverse=True)\n",
    "        return fused_results\n",
    "\n",
    "    def format_context(self, retrieved_docs: List[Dict]) -> str:\n",
    "        \"\"\"格式化上下文\"\"\"\n",
    "        context = \"相关文档信息:\\n\\n\"\n",
    "        for i, doc in enumerate(retrieved_docs):\n",
    "            context += f\"[文档 {i+1} - 相关性: {doc['score']:.3f}]\\n\"\n",
    "            context += f\"{doc['content']}\\n\\n\"\n",
    "        return context\n",
    "\n",
    "    def generate_answer(self, query: str, context: str) -> str:\n",
    "        \"\"\"生成最终答案\"\"\"\n",
    "        prompt = f\"\"\"基于以下上下文信息，回答问题。如果上下文没有提供足够信息，请如实说明。\n",
    "\n",
    "上下文:\n",
    "{context}\n",
    "\n",
    "问题: {query}\n",
    "\n",
    "答案:\"\"\"\n",
    "\n",
    "        inputs = self.llm_tokenizer(\n",
    "            prompt, return_tensors=\"pt\", max_length=1024, truncation=True)\n",
    "\n",
    "        outputs = self.llm_model.generate(\n",
    "            inputs.input_ids,\n",
    "            max_length=256,\n",
    "            num_beams=5,\n",
    "            early_stopping=True,\n",
    "            temperature=0.3\n",
    "        )\n",
    "\n",
    "        answer = self.llm_tokenizer.decode(\n",
    "            outputs[0], skip_special_tokens=True)\n",
    "        return answer\n",
    "\n",
    "    def rag_fusion_retrieve(self, query: str, top_k: int = 5) -> List[Dict]:\n",
    "        \"\"\"RAG Fusion 检索\"\"\"\n",
    "        # 1. 查询扩展\n",
    "        expanded_queries = self.query_expansion(query)\n",
    "        print(f\"生成的查询: {expanded_queries}\")\n",
    "\n",
    "        # 2. 并行检索\n",
    "        all_results = []\n",
    "        for q in expanded_queries:\n",
    "            results = self.retrieve(q, top_k=top_k * 2)  # 每个查询多检索一些\n",
    "            all_results.append(results)\n",
    "\n",
    "        # 3. 结果融合\n",
    "        fused_results = self.reciprocal_rank_fusion(all_results)\n",
    "\n",
    "        return fused_results[:top_k]\n",
    "\n",
    "    def ask(self, question: str, use_rag_fusion: bool = True) -> Dict[str, Any]:\n",
    "        \"\"\"完整的问答流程\"\"\"\n",
    "        # 检索相关文档\n",
    "        if use_rag_fusion:\n",
    "            retrieved_docs = self.rag_fusion_retrieve(question, top_k=5)\n",
    "        else:\n",
    "            retrieved_docs = self.retrieve(question, top_k=5)\n",
    "\n",
    "        # 格式化上下文\n",
    "        context = self.format_context(retrieved_docs)\n",
    "\n",
    "        # 生成答案\n",
    "        answer = self.generate_answer(question, context)\n",
    "\n",
    "        return {\n",
    "            'question': question,\n",
    "            'answer': answer,\n",
    "            'retrieved_documents': retrieved_docs,\n",
    "            'context_preview': context[:500] + \"...\" if len(context) > 500 else context\n",
    "        }\n",
    "\n",
    "# 示例使用\n",
    "\n",
    "\n",
    "def main():\n",
    "    # 初始化 RAG 系统\n",
    "    rag = SimpleRAG()\n",
    "\n",
    "    # 准备示例文档\n",
    "    documents = [\n",
    "        {\n",
    "            'title': '人工智能介绍',\n",
    "            'content': '''\n",
    "            人工智能是计算机科学的一个分支，旨在创建能够执行通常需要人类智能的任务的系统。\n",
    "            机器学习是人工智能的一个子领域，它使计算机能够在没有明确编程的情况下学习。\n",
    "            深度学习是机器学习的一种，使用神经网络模拟人脑的工作方式。\n",
    "            ''',\n",
    "            'source': '科技百科'\n",
    "        },\n",
    "        {\n",
    "            'title': '机器学习应用',\n",
    "            'content': '''\n",
    "            机器学习已广泛应用于各个领域，包括自然语言处理、计算机视觉、推荐系统等。\n",
    "            在自然语言处理中，Transformer架构已成为最先进的模型，如BERT和GPT。\n",
    "            这些模型在文本生成、翻译、摘要等任务上表现出色。\n",
    "            ''',\n",
    "            'source': '技术期刊'\n",
    "        }\n",
    "    ]\n",
    "\n",
    "    # 添加文档到知识库\n",
    "    rag.add_documents(documents)\n",
    "\n",
    "    # 提问\n",
    "    question = \"什么是深度学习？\"\n",
    "    result = rag.ask(question, use_rag_fusion=True)\n",
    "\n",
    "    print(f\"问题: {result['question']}\")\n",
    "    print(f\"答案: {result['answer']}\")\n",
    "    print(\"\\n检索到的相关文档:\")\n",
    "    for i, doc in enumerate(result['retrieved_documents']):\n",
    "        print(f\"{i+1}. [{doc['score']:.3f}] {doc['content'][:100]}...\")\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.13.5 (tags/v3.13.5:6cb20a2, Jun 11 2025, 16:15:46) [MSC v.1943 64 bit (AMD64)]"
  },
  "orig_nbformat": 4,
  "vscode": {
   "interpreter": {
    "hash": "340a546b2c4c9cf5d23eb4a2a4e78e923e0b7afe4d00162258c4442b3ee3b061"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
