import os
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"

import numpy as np
import torch
import faiss
from sentence_transformers import SentenceTransformer
from transformers import AutoTokenizer, AutoModelForCausalLM
from typing import List, Tuple


class FaissRetriever:
  def __init__(self, 
      embedding_model_path: str = "Qwen/Qwen3-Embedding-0.6B",
      reranker_model_path: str = "Qwen/Qwen3-Reranker-0.6B",
      device: str = "cuda" if torch.cuda.is_available() else "mps"):
    """
    初始化检索器

    参数:
        embedding_model_path: Embedding模型路径
        reranker_model_path: Reranker模型路径
        device: 计算设备
    """
    self.device = device

    # 1. 加载Embedding模型
    print("⏳ 正在加载Embedding模型...")
    self.embedding_model = SentenceTransformer(
        embedding_model_path,
        device=device
    )

    # 2. 加载Reranker模型
    print("⏳ 正在加载Reranker模型...")
    self.reranker_tokenizer = AutoTokenizer.from_pretrained(
        reranker_model_path,
        padding_side='left'
    )
    self.reranker_model = AutoModelForCausalLM.from_pretrained(
        reranker_model_path
    ).to(device).eval()

    # 3. 初始化Reranker参数
    self.token_false_id = self.reranker_tokenizer.convert_tokens_to_ids("no")
    self.token_true_id = self.reranker_tokenizer.convert_tokens_to_ids("yes")
    self.max_length = 8192

    # 4. 构建提示模板
    self.prefix = "<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be \"yes\" or \"no\".<|im_end|>\n<|im_start|>user\n"
    self.suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n"
    self.prefix_tokens = self.reranker_tokenizer.encode(
        self.prefix,
        add_special_tokens=False
    )
    self.suffix_tokens = self.reranker_tokenizer.encode(
        self.suffix,
        add_special_tokens=False
    )

    # 5. 初始化FAISS索引和文档
    self.faiss_index = None
    self.documents = []

    print("✅ 检索器初始化完成!")

  def load_index(self, index_path: str, docs_path: str) -> None:
    """
    从磁盘加载FAISS索引和文档

    参数:
        index_path: FAISS索引加载路径
        docs_path: 文档加载路径
    """
    # 加载FAISS索引
    self.faiss_index = faiss.read_index(index_path)
    print(f"📂 已加载FAISS索引: {index_path}")

    # 加载文档
    with open(docs_path, 'r', encoding='utf-8') as f:
      self.documents = [line.strip() for line in f.readlines()]
    print(f"📂 已加载文档: {docs_path}, 共 {len(self.documents)} 个文档")

  def retrieve_and_rerank(self, 
      query: str, 
      top_k_recall: int = 100, 
      top_k_rerank: int = 5) -> List[Tuple[str, float]]:
    """
    执行召回和重排流程

    参数:
        query: 用户查询
        top_k_recall: 召回阶段返回的文档数量
        top_k_rerank: 重排后返回的文档数量

    返回:
        重排后的文档列表 (文档内容, 相关性分数)
    """
    if self.faiss_index is None or not self.documents:
      raise ValueError("请先加载FAISS索引和文档!")

    # 阶段1: 召回 (Embedding)
    print("🔍 召回阶段...")
    query_embedding = self.embedding_model.encode(
        query,
        convert_to_tensor=True,
        device=self.device
    ).cpu().numpy().astype('float32').reshape(1, -1)
    faiss.normalize_L2(query_embedding)
    distances, indices = self.faiss_index.search(query_embedding, top_k_recall)
    recall_indices = indices[0]
    recall_scores = distances[0]
    recall_docs = [self.documents[idx] for idx in recall_indices if idx != -1]

    print(f"  召回完成: {len(recall_docs)} 个文档")

    # 阶段2: 重排 (Reranker)
    print("📊 重排阶段...")

    # 准备输入对
    pairs = [
      f"<Instruct>: Given a web search query, retrieve relevant passages that answer the query\n" \
      f"<Query>: {query}\n" \
      f"<Document>: {doc}" \
      for doc in recall_docs
    ]

    # 处理输入
    inputs = self._process_reranker_inputs(pairs)

    # 计算重排分数
    rerank_scores = self._compute_rerank_scores(inputs)

    # 组合召回分数和重排分数
    combined_scores = [
      (recall_docs[i], rerank_scores[i], recall_scores[i])
      for i in range(len(recall_docs))
    ]

    # 按重排分数排序
    combined_scores.sort(key=lambda x: x[1], reverse=True)

    # 返回Top K结果
    return [(doc, rerank_score) for doc, rerank_score, _ in
            combined_scores[:top_k_rerank]]

  def _process_reranker_inputs(self, pairs: List[str]) -> dict:
    """
    处理Reranker模型的输入

    参数:
        pairs: 查询-文档对列表

    返回:
        模型输入字典
    """
    # 初始分词
    inputs = self.reranker_tokenizer(
        pairs,
        padding=False,
        truncation='longest_first',
        return_attention_mask=False,
        max_length=self.max_length - len(self.prefix_tokens) - len(
          self.suffix_tokens)
    )

    # 添加前缀和后缀token
    for i, ele in enumerate(inputs['input_ids']):
      inputs['input_ids'][i] = self.prefix_tokens + ele + self.suffix_tokens

    # 填充到统一长度
    inputs = self.reranker_tokenizer.pad(
        inputs,
        padding=True,
        return_tensors="pt",
        max_length=self.max_length
    )

    # 移动到设备
    for key in inputs:
      inputs[key] = inputs[key].to(self.device)

    return inputs

  def _compute_rerank_scores(self, inputs: dict) -> List[float]:
    """
    计算Reranker分数

    参数:
        inputs: 模型输入

    返回:
        相关性分数列表
    """
    with torch.no_grad():
      # 获取模型输出
      outputs = self.reranker_model(**inputs)

      # 获取最后一个token的logits
      batch_scores = outputs.logits[:, -1, :]

      # 提取"yes"和"no"的分数
      true_vector = batch_scores[:, self.token_true_id]
      false_vector = batch_scores[:, self.token_false_id]

      # 计算概率分布
      batch_scores = torch.stack([false_vector, true_vector], dim=1)
      batch_scores = torch.nn.functional.log_softmax(batch_scores, dim=1)

      # 返回"yes"的概率
      scores = batch_scores[:, 1].exp().tolist()

    return scores


# ==================== 使用示例 ====================
if __name__ == "__main__":
  # 1. 初始化检索器
  retriever = FaissRetriever(
      embedding_model_path="Qwen/Qwen3-Embedding-0.6B",
      reranker_model_path="Qwen/Qwen3-Reranker-0.6B",
      device="cpu"
  )

  # 2. 加载索引和文档
  retriever.load_index("faiss_index.bin", "documents.txt")

  # 3. 执行查询
  queries = [
    "中国的首都是哪个城市？",
    "解释什么是量子纠缠",
    "巴黎有哪些著名景点？",
    "什么是相对论？"
  ]

  for query in queries:
    print(f"\n🔎 查询: '{query}'")
    results = retriever.retrieve_and_rerank(query, top_k_recall=5, top_k_rerank=3)

    print("📈 重排结果:")
    for i, (doc, score) in enumerate(results):
      print(
        f"  {i + 1}. [分数: {score:.4f}] {doc[:60]}{'...' if len(doc) > 60 else ''}")