"""
重排器 - 对检索结果进行精确重排
使用Cross-Encoder模型提高结果质量
"""

import asyncio
from typing import List, Dict, Any, Optional
import torch
from sentence_transformers import CrossEncoder
import numpy as np

from ...core.interfaces import RerankingInterface, SearchResult
from ...core.events import EventEmitter
from ...core.config import config


class CrossEncoderReranker(RerankingInterface, EventEmitter):
    """基于Cross-Encoder的重排器"""
    
    def __init__(self):
        super().__init__()
        self.model = None
        self.model_name = None
        self.device = None
        self._initialized = False
    
    async def initialize(self):
        """初始化重排模型"""
        if self._initialized:
            return
        
        try:
            # 从配置获取模型设置
            reranking_config = config.get("lightrag_engine.reranking", {})
            self.model_name = reranking_config.get("model", "BAAI/bge-reranker-base")
            
            # 检测设备
            self.device = "cuda" if torch.cuda.is_available() else "cpu"
            
            # 加载模型
            self.model = CrossEncoder(self.model_name, device=self.device)
            
            self._initialized = True
            
            await self.emit("reranker_initialized", {
                "model": self.model_name,
                "device": self.device
            })
            
        except Exception as e:
            await self.emit_error("reranker_initialization", e)
            raise
    
    async def rerank(
        self, 
        query: str, 
        results: List[SearchResult], 
        top_k: int = 5
    ) -> List[SearchResult]:
        """重排搜索结果"""
        if not self._initialized:
            await self.initialize()
        
        if len(results) <= 1:
            return results[:top_k]
        
        try:
            await self.emit("reranking_started", {
                "query": query[:100] + "..." if len(query) > 100 else query,
                "input_count": len(results),
                "target_count": top_k
            })
            
            # 准备输入对
            query_doc_pairs = []
            for result in results:
                # 截断文档内容以适应模型限制
                doc_content = self._truncate_content(result.document.content, max_length=512)
                query_doc_pairs.append([query, doc_content])
            
            # 批量计算重排分数
            rerank_scores = await self._compute_rerank_scores(query_doc_pairs)
            
            # 更新结果分数
            reranked_results = []
            for i, result in enumerate(results):
                new_result = SearchResult(
                    document=result.document,
                    score=rerank_scores[i],
                    relevance_type="reranked"
                )
                
                # 保存原始信息
                new_result.document.metadata["original_score"] = result.score
                new_result.document.metadata["original_relevance_type"] = result.relevance_type
                new_result.document.metadata["rerank_score"] = rerank_scores[i]
                
                reranked_results.append(new_result)
            
            # 按新分数排序
            reranked_results.sort(key=lambda x: x.score, reverse=True)
            
            # 截取top_k
            final_results = reranked_results[:top_k]
            
            await self.emit("reranking_completed", {
                "query": query[:100] + "..." if len(query) > 100 else query,
                "input_count": len(results),
                "output_count": len(final_results),
                "score_change": self._calculate_score_changes(results, final_results)
            })
            
            return final_results
            
        except Exception as e:
            await self.emit_error("rerank", e)
            # 降级处理：返回原始结果
            return results[:top_k]
    
    async def _compute_rerank_scores(self, query_doc_pairs: List[List[str]]) -> List[float]:
        """计算重排分数"""
        try:
            # 在线程池中运行以避免阻塞
            loop = asyncio.get_event_loop()
            
            def compute_scores():
                return self.model.predict(query_doc_pairs, batch_size=16)
            
            scores = await loop.run_in_executor(None, compute_scores)
            
            # 转换为Python列表
            if hasattr(scores, 'tolist'):
                scores = scores.tolist()
            
            return scores
            
        except Exception as e:
            await self.emit_error("compute_rerank_scores", e)
            # 返回随机分数作为降级
            return [0.5] * len(query_doc_pairs)
    
    def _truncate_content(self, content: str, max_length: int = 512) -> str:
        """截断内容以适应模型限制"""
        if len(content) <= max_length:
            return content
        
        # 尝试在句子边界截断
        sentences = content.split('。')
        truncated = ""
        
        for sentence in sentences:
            if len(truncated + sentence + '。') <= max_length:
                truncated += sentence + '。'
            else:
                break
        
        if truncated:
            return truncated
        else:
            # 直接截断
            return content[:max_length] + "..."
    
    def _calculate_score_changes(self, original: List[SearchResult], reranked: List[SearchResult]) -> Dict[str, Any]:
        """计算分数变化统计"""
        try:
            # 创建原始分数映射
            original_scores = {result.document.id: result.score for result in original}
            
            changes = []
            for result in reranked:
                doc_id = result.document.id
                if doc_id in original_scores:
                    original_score = original_scores[doc_id]
                    new_score = result.score
                    change = new_score - original_score
                    changes.append(change)
            
            if changes:
                return {
                    "mean_change": np.mean(changes),
                    "max_change": max(changes),
                    "min_change": min(changes),
                    "std_change": np.std(changes)
                }
            else:
                return {"mean_change": 0, "max_change": 0, "min_change": 0, "std_change": 0}
                
        except Exception:
            return {"error": "Failed to calculate score changes"}


class LLMReranker(RerankingInterface, EventEmitter):
    """基于LLM的重排器"""
    
    def __init__(self):
        super().__init__()
        self.client = None
        self._initialized = False
    
    async def initialize(self):
        """初始化LLM客户端"""
        if self._initialized:
            return
        
        try:
            from openai import AsyncOpenAI
            
            api_key = config.get("external_services.openai.api_key")
            if not api_key:
                raise ValueError("OpenAI API key not found")
            
            self.client = AsyncOpenAI(api_key=api_key)
            self._initialized = True
            
            await self.emit("llm_reranker_initialized", {})
            
        except Exception as e:
            await self.emit_error("llm_reranker_initialization", e)
            raise
    
    async def rerank(
        self, 
        query: str, 
        results: List[SearchResult], 
        top_k: int = 5
    ) -> List[SearchResult]:
        """使用LLM重排结果"""
        if not self._initialized:
            await self.initialize()
        
        if len(results) <= 1:
            return results[:top_k]
        
        try:
            await self.emit("llm_reranking_started", {
                "query": query[:100] + "..." if len(query) > 100 else query,
                "input_count": len(results),
                "target_count": top_k
            })
            
            # 构建重排提示
            prompt = self._build_rerank_prompt(query, results)
            
            # 调用LLM
            response = await self.client.chat.completions.create(
                model="gpt-3.5-turbo",
                messages=[
                    {"role": "system", "content": "你是一个搜索结果排序专家。"},
                    {"role": "user", "content": prompt}
                ],
                temperature=0.1
            )
            
            # 解析响应
            reranked_results = self._parse_llm_response(response.choices[0].message.content, results)
            
            await self.emit("llm_reranking_completed", {
                "query": query[:100] + "..." if len(query) > 100 else query,
                "output_count": len(reranked_results)
            })
            
            return reranked_results[:top_k]
            
        except Exception as e:
            await self.emit_error("llm_rerank", e)
            return results[:top_k]
    
    def _build_rerank_prompt(self, query: str, results: List[SearchResult]) -> str:
        """构建重排提示"""
        prompt = f"""请根据查询内容对以下搜索结果进行重新排序，按照相关性从高到低排列。

查询: {query}

搜索结果:
"""
        
        for i, result in enumerate(results):
            content_preview = result.document.content[:200] + "..." if len(result.document.content) > 200 else result.document.content
            prompt += f"\n{i+1}. 文档ID: {result.document.id}\n内容: {content_preview}\n原始分数: {result.score:.3f}\n"
        
        prompt += """\n请返回重新排序后的文档ID列表，格式如下：
1. 文档ID1
2. 文档ID2
3. 文档ID3
...

只返回重新排序的列表，不需要解释。"""
        
        return prompt
    
    def _parse_llm_response(self, response: str, original_results: List[SearchResult]) -> List[SearchResult]:
        """解析LLM响应"""
        try:
            # 创建文档ID到结果的映射
            doc_map = {result.document.id: result for result in original_results}
            
            # 解析响应中的文档ID
            lines = response.strip().split('\n')
            reranked_results = []
            
            for line in lines:
                line = line.strip()
                if line and ('.' in line or line.startswith('-')):
                    # 提取文档ID
                    parts = line.split('.', 1)
                    if len(parts) > 1:
                        doc_id = parts[1].strip()
                        if doc_id in doc_map:
                            result = doc_map[doc_id]
                            # 创建新的结果，更新分数
                            new_score = 1.0 - (len(reranked_results) * 0.1)  # 递减分数
                            new_result = SearchResult(
                                document=result.document,
                                score=new_score,
                                relevance_type="llm_reranked"
                            )
                            reranked_results.append(new_result)
            
            # 如果解析失败，返回原始结果
            if not reranked_results:
                return original_results
            
            return reranked_results
            
        except Exception as e:
            # 同步错误处理，因为这不是async函数
            print(f"Error in parse_llm_response: {e}")
            return original_results


class HybridReranker(RerankingInterface, EventEmitter):
    """混合重排器 - 结合多种重排策略"""
    
    def __init__(self):
        super().__init__()
        self.cross_encoder_reranker = CrossEncoderReranker()
        self.llm_reranker = None  # 可选的LLM重排器
        self._initialized = False
    
    async def initialize(self):
        """初始化混合重排器"""
        if self._initialized:
            return
        
        try:
            # 初始化Cross-Encoder重排器
            await self.cross_encoder_reranker.initialize()
            
            # 可选：初始化LLM重排器
            try:
                self.llm_reranker = LLMReranker()
                await self.llm_reranker.initialize()
            except Exception:
                self.llm_reranker = None  # LLM重排器初始化失败，仅使用Cross-Encoder
            
            self._initialized = True
            
            await self.emit("hybrid_reranker_initialized", {
                "cross_encoder_available": True,
                "llm_reranker_available": self.llm_reranker is not None
            })
            
        except Exception as e:
            await self.emit_error("hybrid_reranker_initialization", e)
            raise
    
    async def rerank(
        self, 
        query: str, 
        results: List[SearchResult], 
        top_k: int = 5,
        use_llm: bool = False
    ) -> List[SearchResult]:
        """混合重排"""
        if not self._initialized:
            await self.initialize()
        
        try:
            # 首先使用Cross-Encoder重排
            reranked_results = await self.cross_encoder_reranker.rerank(query, results, top_k * 2)
            
            # 可选：使用LLM进行二次重排
            if use_llm and self.llm_reranker and len(reranked_results) > 1:
                final_results = await self.llm_reranker.rerank(query, reranked_results, top_k)
            else:
                final_results = reranked_results[:top_k]
            
            await self.emit("hybrid_reranking_completed", {
                "query": query[:100] + "..." if len(query) > 100 else query,
                "stages_used": ["cross_encoder"] + (["llm"] if use_llm and self.llm_reranker else []),
                "final_count": len(final_results)
            })
            
            return final_results
            
        except Exception as e:
            await self.emit_error("hybrid_rerank", e)
            return results[:top_k]