# -*- coding: utf-8 -*-
"""
BGE Reranker模型实现
支持bge-reranker-base和bge-reranker-large
"""

from typing import List, Tuple, Optional
import torch
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from loguru import logger

from questionretrieval.core.base_reranker import BaseReranker
from ...config import get_rerank_model_config, DEFAULT_DEVICE


class BGEReranker(BaseReranker):
    """
    BGE Reranker模型实现
    
    支持的模型:
    - BAAI/bge-reranker-base
    - BAAI/bge-reranker-large
    - BAAI/bge-reranker-v2-m3
    """
    
    SUPPORTED_MODELS = {
        'bge-reranker-base': 'BAAI/bge-reranker-base',
        'bge-reranker-large': 'BAAI/bge-reranker-large',
        'bge-reranker-v2-m3': 'BAAI/bge-reranker-v2-m3'
    }
    
    def __init__(self, model_name: str = 'bge-reranker-base', device: Optional[str] = None, 
                 max_length: int = 512, batch_size: int = 32, **kwargs):
        """
        初始化BGE Reranker
        
        Args:
            model_name: 模型名称，支持 'bge-reranker-base', 'bge-reranker-large' 或 'bge-reranker-v2-m3'
            device: 设备，如果为None则自动选择
            max_length: 最大序列长度
            batch_size: 批处理大小
            **kwargs: 其他配置参数
        """
        if model_name not in self.SUPPORTED_MODELS:
            raise ValueError(f"不支持的模型: {model_name}. 支持的模型: {list(self.SUPPORTED_MODELS.keys())}")
        
        super().__init__(model_name, **kwargs)
        
        # 获取模型配置
        if model_name in ['bge-reranker-base', 'BAAI/bge-reranker-base']:
            config = get_rerank_model_config('reranker', 'bge_base')
        elif model_name in ['bge-reranker-large', 'BAAI/bge-reranker-large']:
            config = get_rerank_model_config('reranker', 'bge_large')
        elif model_name in ['bge-reranker-v2-m3', 'BAAI/bge-reranker-v2-m3']:
            config = get_rerank_model_config('reranker', 'bge_v2_m3')
        else:
            raise ValueError(f"不支持的模型: {model_name}")
        
        self.model_path = self.SUPPORTED_MODELS[model_name]
        self.device = device or config.get('device', DEFAULT_DEVICE)
        self.max_length = max_length or config.get('max_length', 512)
        self.batch_size = batch_size or config.get('batch_size', 32)
        
        self.tokenizer = None
        self.model = None
        
        logger.info(f"初始化BGE Reranker: {model_name}, 设备: {self.device}")
    
    def load_model(self) -> None:
        """
        加载BGE模型和tokenizer
        """
        if self._is_loaded:
            logger.info(f"模型 {self.model_name} 已加载")
            return
        
        try:
            logger.info(f"正在加载BGE模型: {self.model_path}")
            
            # 加载tokenizer
            self.tokenizer = AutoTokenizer.from_pretrained(
                self.model_path
            )
            
            # 加载模型
            self.model = AutoModelForSequenceClassification.from_pretrained(
                self.model_path
            )
            
            # 移动到指定设备
            self.model.to(self.device)
            self.model.eval()
            
            self._is_loaded = True
            logger.info(f"BGE模型加载成功: {self.model_path}")
            
        except Exception as e:
            logger.error(f"加载BGE模型失败: {e}")
            raise
    
    def _compute_score(self, query: str, passage: str) -> float:
        """
        计算查询和文档的相关性分数
        
        Args:
            query: 查询文本
            passage: 文档文本
            
        Returns:
            float: 相关性分数
        """
        # 构造输入文本
        input_text = f"{query} [SEP] {passage}"
        
        # 编码
        inputs = self.tokenizer(
            input_text,
            return_tensors='pt',
            max_length=self.max_length,
            truncation=True,
            padding=True
        ).to(self.device)
        
        # 推理
        with torch.no_grad():
            outputs = self.model(**inputs)
            score = torch.sigmoid(outputs.logits).cpu().item()
        
        return score
    
    def _batch_compute_scores(self, query: str, passages: List[str]) -> List[float]:
        """
        批量计算分数
        
        Args:
            query: 查询文本
            passages: 文档列表
            
        Returns:
            List[float]: 分数列表
        """
        scores = []
        
        # 分批处理
        for i in range(0, len(passages), self.batch_size):
            batch_passages = passages[i:i + self.batch_size]
            
            # 构造批量输入
            input_texts = [f"{query} [SEP] {passage}" for passage in batch_passages]
            
            # 编码
            inputs = self.tokenizer(
                input_texts,
                return_tensors='pt',
                max_length=self.max_length,
                truncation=True,
                padding=True
            ).to(self.device)
            
            # 推理
            with torch.no_grad():
                outputs = self.model(**inputs)
                batch_scores = torch.sigmoid(outputs.logits).cpu().numpy().flatten()
                scores.extend(batch_scores.tolist())
        
        return scores
    
    def rerank(self, query: str, candidates: List[str], top_k: Optional[int] = None) -> List[Tuple[str, float]]:
        """
        对候选文档进行重排序
        
        Args:
            query: 查询文本
            candidates: 候选文档列表
            top_k: 返回前k个结果
            
        Returns:
            List[Tuple[str, float]]: 重排序后的(文档, 分数)列表
        """
        if not self._is_loaded:
            self.load_model()
        
        if not candidates:
            return []
        
        # 批量计算分数
        scores = self._batch_compute_scores(query, candidates)
        
        # 组合文档和分数
        doc_scores = list(zip(candidates, scores))
        
        # 按分数降序排序
        doc_scores.sort(key=lambda x: x[1], reverse=True)
        
        # 返回前k个结果
        if top_k is not None:
            doc_scores = doc_scores[:top_k]
        
        return doc_scores
    
    def get_model_info(self) -> dict:
        """
        获取模型信息
        """
        info = super().get_model_info()
        info.update({
            'model_path': self.model_path,
            'device': self.device,
            'max_length': self.max_length,
            'batch_size': self.batch_size,
            'model_type': 'BGE Reranker'
        })
        return info