import os
import torch
import numpy as np
from typing import List, Union, Tuple
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from utils.utils import timer
from config import LLM_DEVICE

os.environ["TOKENIZERS_PARALLELISM"] = "false"

class ReRankLLM:
    def __init__(
        self,
        model_path: str,
        max_length: int = 512,
        batch_size: int = 8,
        device: str = None,
    ):
        self.device = device or LLM_DEVICE
        self.max_length = max_length
        self.batch_size = batch_size

        # 加载 tokenizer 和 model
        try:
            self.tokenizer = AutoTokenizer.from_pretrained(model_path)
            self.model = AutoModelForSequenceClassification.from_pretrained(model_path)
            self.model.eval()
            self.model.half()  # FP16 推理
            self.model.to(self.device)
        except Exception as e:
            raise RuntimeError(f"Failed to load model from {model_path}: {e}")

        # 验证设备
        if "cuda" in self.device and not torch.cuda.is_available():
            raise ValueError(f"CUDA is not available, but device is set to {self.device}")

    @timer
    def predict(
        self,
        query: str,
        docs: List,
        return_scores: bool = False,
    ) -> Union[List, Tuple[List, List[float]]]:
        """
        对文档进行重排序，返回按相关性降序排列的文档列表

        Args:
            query: 查询文本
            docs: 文档列表（需有 page_content 属性）
            return_scores: 是否返回每个文档的得分

        Returns:
            重排序后的文档列表，或 (文档列表, 得分列表)
        """
        if not docs:
            return ([], []) if return_scores else []

        # 构造 (query, doc) 对
        pairs = [(query, doc.page_content) for doc in docs]

        # 分批推理
        all_scores = []
        with torch.no_grad():
            for i in range(0, len(pairs), self.batch_size):
                batch_pairs = pairs[i : i + self.batch_size]
                inputs = self.tokenizer(
                    batch_pairs,
                    padding=True,
                    truncation=True,
                    return_tensors="pt",
                    max_length=self.max_length,
                ).to(self.device)

                # 使用 autocast 提升 FP16 推理效率
                with torch.cuda.amp.autocast():
                    outputs = self.model(**inputs)
                    # 取 logits 第一列（相关性得分）
                    scores = outputs.logits.float().squeeze(-1)  # [batch_size]
                    all_scores.extend(scores.cpu().numpy())

        # 排序：得分从高到低
        sorted_indices = np.argsort(all_scores)[::-1]
        sorted_docs = [docs[i] for i in sorted_indices]
        sorted_scores = [all_scores[i] for i in sorted_indices]

        # 可选：返回得分
        if return_scores:
            return sorted_docs, sorted_scores

        return sorted_docs

    def __call__(self, query: str, docs: List, return_scores: bool = False):
        return self.predict(query, docs, return_scores)


# ======================
# 使用示例
# ======================
if __name__ == "__main__":
    model_path = "BAAI/bge-reranker-large"
    
    try:
        reranker = ReRankLLM(
            model_path=model_path,
            max_length=512,
            batch_size=16,
            device="cuda:0"  # 可改为 "cpu" 或 "cuda:1"
        )

        # 模拟文档
        class Doc:
            def __init__(self, content):
                self.page_content = content

        docs = [
            Doc("人工智能是未来的发展方向"),
            Doc("Python 是一种编程语言"),
            Doc("北京是中国的首都"),
            Doc("机器学习是 AI 的子领域"),
            Doc("深度学习需要大量数据"),
        ]
        query = "什么是人工智能"

        # 调用 rerank
        ranked_docs, scores = reranker(query, docs, return_scores=True)

        print("Reranked Results:")
        for i, (doc, score) in enumerate(zip(ranked_docs, scores)):
            print(f"{i+1}. [Score: {score:.4f}] {doc.page_content}")

    except Exception as e:
        print(f"Error during reranking: {e}")