{
 "cells": [
  {
   "attachments": {},
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## RAG Fusion 核心代码实现"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "from typing import List, Dict\n",
    "from rank_bm25 import BM25Okapi\n",
    "from transformers import AutoTokenizer, AutoModel\n",
    "import torch\n",
    "\n",
    "class RAGFusion:\n",
    "    def __init__(self, retriever, llm_model, num_queries=5):\n",
    "      self.retriever = retriever\n",
    "      self.llm_model = llm_model\n",
    "      self.num_queries = num_queries\n",
    "      self.tokenizer = AutoTokenizer.from_pretrained(\"bert-base-uncased\")\n",
    "      self.rerank_model = AutoModel.from_pretrained(\"bert-base-uncased\")\n",
    "    \n",
    "    def generate_queries(self, original_query: str) -> List[str]:\n",
    "      \"\"\"生成多个相关查询\"\"\"\n",
    "      prompt = f\"\"\"\n",
    "      基于以下问题，生成{self.num_queries}个相关的搜索查询。\n",
    "      确保查询从不同角度探讨原始问题。\n",
    "      \n",
    "      原始问题: {original_query}\n",
    "      \n",
    "      生成的查询:\n",
    "      1. \"\"\"\n",
    "      \n",
    "      response = self.llm_model.generate(prompt)\n",
    "      queries = [original_query]  # 总是包含原始查询\n",
    "      # 解析生成的查询并添加到列表\n",
    "      # ... 解析逻辑\n",
    "      return queries\n",
    "    \n",
    "    def reciprocal_rank_fusion(self, all_results: List[List[Dict]], k=60) -> List[Dict]:\n",
    "      \"\"\"Reciprocal Rank Fusion 算法\"\"\"\n",
    "      fused_scores = {}\n",
    "      \n",
    "      for results in all_results:\n",
    "        for rank, doc in enumerate(results):\n",
    "          doc_id = doc['id']\n",
    "          if doc_id not in fused_scores:\n",
    "            fused_scores[doc_id] = 0\n",
    "          fused_scores[doc_id] += 1 / (rank + k + 1)\n",
    "      \n",
    "      # 按分数排序\n",
    "      sorted_docs = sorted(\n",
    "        [{'id': doc_id, 'score': score} for doc_id, score in fused_scores.items()],\n",
    "        key=lambda x: x['score'],\n",
    "        reverse=True\n",
    "      )\n",
    "      return sorted_docs\n",
    "    \n",
    "    def rerank_documents(self, query: str, documents: List[Dict]) -> List[Dict]:\n",
    "      \"\"\"基于语义相似度重排序\"\"\"\n",
    "      # 使用交叉编码器或BERT进行精细重排\n",
    "      query_embedding = self._get_embedding(query)\n",
    "      doc_embeddings = [self._get_embedding(doc['content']) for doc in documents]\n",
    "      \n",
    "      similarities = [\n",
    "        self._cosine_similarity(query_embedding, doc_embed)\n",
    "        for doc_embed in doc_embeddings\n",
    "      ]\n",
    "      \n",
    "      # 按相似度重新排序文档\n",
    "      reranked_docs = [\n",
    "        {**doc, 'similarity': sim}\n",
    "        for doc, sim in zip(documents, similarities)\n",
    "      ]\n",
    "      reranked_docs.sort(key=lambda x: x['similarity'], reverse=True)\n",
    "      \n",
    "      return reranked_docs\n",
    "    \n",
    "    def retrieve(self, query: str) -> List[Dict]:\n",
    "      \"\"\"完整的 RAG Fusion 检索流程\"\"\"\n",
    "      # 1. 生成多个查询\n",
    "      queries = self.generate_queries(query)\n",
    "      \n",
    "      # 2. 并行检索\n",
    "      all_results = []\n",
    "      for q in queries:\n",
    "        results = self.retriever.search(q, top_k=10)\n",
    "        all_results.append(results)\n",
    "      \n",
    "      # 3. RRF 融合\n",
    "      fused_results = self.reciprocal_rank_fusion(all_results)\n",
    "      \n",
    "      # 4. 获取完整文档内容\n",
    "      final_docs = []\n",
    "      for result in fused_results[:20]:  # 取前20个\n",
    "        doc = self.retriever.get_document(result['id'])\n",
    "        final_docs.append(doc)\n",
    "      \n",
    "      # 5. 重排序\n",
    "      reranked_docs = self.rerank_documents(query, final_docs)\n",
    "      \n",
    "      return reranked_docs[:10]  # 返回最终前10个文档"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
