import os
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"

import numpy as np
import torch
from sentence_transformers import SentenceTransformer, util
from transformers import AutoTokenizer, AutoModelForCausalLM
from typing import List, Tuple
import faiss



class EmbeddingRerankSystem:
  def __init__(self,
      embedding_model_path: str = "Qwen/Qwen3-Embedding-0.6B",
      reranker_model_path: str = "Qwen/Qwen3-Reranker-0.6B",
      device: str = "cuda" if torch.cuda.is_available() else "mps"):
    """
    初始化Embedding和Reranker模型

    参数:
        embedding_model_path: Embedding模型路径（本地或HuggingFace Hub）
        reranker_model_path: Reranker模型路径（本地或HuggingFace Hub）
        device: 计算设备 (cuda/cpu)
    """
    self.device = device

    # 1. 加载Embedding模型
    print("⏳ 正在加载Embedding模型...")
    self.embedding_model = SentenceTransformer(
        embedding_model_path,
        device=device
    )

    # 2. 加载Reranker模型
    print("⏳ 正在加载Reranker模型...")
    self.reranker_tokenizer = AutoTokenizer.from_pretrained(
        reranker_model_path,
        padding_side='left'
    )
    self.reranker_model = AutoModelForCausalLM.from_pretrained(
        reranker_model_path
    ).to(device).eval()

    # 3. 初始化Reranker参数
    self.token_false_id = self.reranker_tokenizer.convert_tokens_to_ids("no")
    self.token_true_id = self.reranker_tokenizer.convert_tokens_to_ids("yes")
    self.max_length = 8192

    # 4. 构建提示模板
    self.prefix = "<|im_start|>system\nJudge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be \"yes\" or \"no\".<|im_end|>\n<|im_start|>user\n"
    self.suffix = "<|im_end|>\n<|im_start|>assistant\n<think>\n\n</think>\n\n"
    self.prefix_tokens = self.reranker_tokenizer.encode(
        self.prefix,
        add_special_tokens=False
    )
    self.suffix_tokens = self.reranker_tokenizer.encode(
        self.suffix,
        add_special_tokens=False
    )

    # 5. 初始化FAISS索引
    self.dim = self.embedding_model.get_sentence_embedding_dimension()
    self.faiss_index = faiss.IndexFlatIP(self.dim)

    print("✅ 系统初始化完成!")

  def index_documents(self, documents: List[str]) -> None:
    """
    为文档集合创建向量索引

    参数:
        documents: 文档列表
    """
    print(f"📚 正在为 {len(documents)} 个文档创建索引...")
    self.documents = documents
    embeddings = self.embedding_model.encode(
        documents,
        convert_to_tensor=True,
        device=self.device
    ).cpu().numpy().astype('float32')
    faiss.normalize_L2(embeddings)
    self.faiss_index.add(embeddings)
    print("✅ 文档索引创建完成!")

  def retrieve_and_rerank(self,
      query: str,
      top_k_recall: int = 100,
      top_k_rerank: int = 5,
      batch_size: int = 32) -> List[Tuple[str, float]]:
    """
    执行召回和重排流程

    参数:
        query: 用户查询
        top_k_recall: 召回阶段返回的文档数量
        top_k_rerank: 重排后返回的文档数量
        batch_size: 重排阶段的批次大小，用于控制内存占用

    返回:
        重排后的文档列表 (文档内容, 相关性分数)
    """
    # 阶段1: 召回 (Embedding)
    print("🔍 召回阶段...")
    query_embedding = self.embedding_model.encode(
        query,
        prompt_name="query",
        convert_to_tensor=True,
        device=self.device
    ).cpu().numpy().astype('float32').reshape(1, -1)
    faiss.normalize_L2(query_embedding)
    distances, indices = self.faiss_index.search(query_embedding.reshape(1, -1), top_k_recall)
    recall_indices = indices[0]
    recall_scores = distances[0]
    recall_docs = [self.documents[idx] for idx in recall_indices if idx != -1]

    print(f"  召回完成: {len(recall_docs)} 个文档")

    # 阶段2: 重排 (Reranker) - 分批处理
    print(f"📊 重排阶段... 共 {len(recall_docs)} 个文档，批次大小: {batch_size}")

    all_rerank_scores = []
    total_docs = len(recall_docs)

    # 分批处理
    for i in range(0, total_docs, batch_size):
        end = min(i + batch_size, total_docs)
        batch_docs = recall_docs[i:end]
        batch_recall_scores = recall_scores[i:end]
        print(f"  处理批次 {i//batch_size + 1}/{(total_docs + batch_size - 1)//batch_size}，文档范围: {i+1}-{end}/{total_docs}")

        # 准备输入对
        pairs = [
          f"<Instruct>: Given a web search query, retrieve relevant passages that answer the query\n"
          f"<Query>: {query}\n"
          f"<Document>: {doc}"
          for doc in batch_docs
        ]

        # 处理输入
        inputs = self._process_reranker_inputs(pairs)

        # 计算重排分数
        batch_rerank_scores = self._compute_rerank_scores(inputs)

        # 组合召回分数和重排分数
        batch_combined = [
          (batch_docs[j], batch_rerank_scores[j], batch_recall_scores[j])
          for j in range(len(batch_docs))
        ]

        all_rerank_scores.extend(batch_combined)

    # 按重排分数排序
    all_rerank_scores.sort(key=lambda x: x[1], reverse=True)

    # 返回Top K结果
    return [(doc, rerank_score) for doc, rerank_score, _ in
            all_rerank_scores[:top_k_rerank]]

  def _process_reranker_inputs(self, pairs: List[str]) -> dict:
    """
    处理Reranker模型的输入

    参数:
        pairs: 查询-文档对列表

    返回:
        模型输入字典
    """
    # 初始分词
    inputs = self.reranker_tokenizer(
        pairs,
        padding=False,
        truncation='longest_first',
        return_attention_mask=False,
        max_length=self.max_length - len(self.prefix_tokens) - len(
          self.suffix_tokens)
    )

    # 添加前缀和后缀token
    for i, ele in enumerate(inputs['input_ids']):
      inputs['input_ids'][i] = self.prefix_tokens + ele + self.suffix_tokens

    # 填充到统一长度
    inputs = self.reranker_tokenizer.pad(
        inputs,
        padding=True,
        return_tensors="pt",
        max_length=self.max_length
    )

    # 移动到设备
    for key in inputs:
      inputs[key] = inputs[key].to(self.device)

    return inputs

  def _compute_rerank_scores(self, inputs: dict) -> List[float]:
    """
    计算Reranker分数

    参数:
        inputs: 模型输入

    返回:
        相关性分数列表
    """
    with torch.no_grad():
      # 获取模型输出
      outputs = self.reranker_model(**inputs)

      # 获取最后一个token的logits
      batch_scores = outputs.logits[:, -1, :]

      # 提取"yes"和"no"的分数
      true_vector = batch_scores[:, self.token_true_id]
      false_vector = batch_scores[:, self.token_false_id]

      # 计算概率分布
      batch_scores = torch.stack([false_vector, true_vector], dim=1)
      batch_scores = torch.nn.functional.log_softmax(batch_scores, dim=1)

      # 返回"yes"的概率
      scores = batch_scores[:, 1].exp().tolist()

    return scores


# ==================== 使用示例 ====================
if __name__ == "__main__":
  # 1. 初始化系统
  system = EmbeddingRerankSystem(
      embedding_model_path="Qwen/Qwen3-Embedding-0.6B",
      reranker_model_path="Qwen/Qwen3-Reranker-0.6B",
      device="cpu"
  )

  # 2. 准备文档库
  documents = [
    "北京是中国的首都，位于华北平原的北部。",
    "巴黎是法国的首都，也是欧洲重要的文化和商业中心。",
    "重力是物体之间相互吸引的力，由艾萨克·牛顿首次系统描述。",
    "量子力学是描述微观粒子行为的物理学理论。",
    "长城是中国古代的军事防御工程，是世界文化遗产。",
    "埃菲尔铁塔位于法国巴黎，是世界上最著名的建筑之一。",
    "黑洞是时空曲率大到连光都无法逃脱的天体。",
    "故宫位于北京，是中国明清两代的皇家宫殿。",
    "相对论是爱因斯坦提出的关于时空和引力的理论。",
    "亚马逊雨林是世界上最大的热带雨林，位于南美洲。",
    "大熊猫是中国的国宝，主要生活在四川的山区。",
    "人工智能是计算机科学的一个分支，旨在创造智能机器。",
    "长城始建于春秋战国时期，用于防御北方游牧民族。",
    "量子纠缠是量子力学中的一种现象，两个粒子无论距离多远都能瞬间影响对方。",
    "卢浮宫位于巴黎，是世界上最大的艺术博物馆之一。"
  ]

  # 3. 创建文档索引
  system.index_documents(documents)

  # 4. 执行查询
  queries = [
    "中国的首都是哪个城市？",
    "解释什么是量子纠缠",
    "巴黎有哪些著名景点？",
    "什么是相对论？"
  ]

  for query in queries:
    print(f"\n🔎 查询: '{query}'")
    results = system.retrieve_and_rerank(query, top_k_recall=5, top_k_rerank=3)

    print("📈 重排结果:")
    for i, (doc, score) in enumerate(results):
      print(
        f"  {i + 1}. [分数: {score:.4f}] {doc[:60]}{'...' if len(doc) > 60 else ''}")