"""
高级重排序:语义匹配度、关键信息覆盖度、上下文相关性
"""

import logging
import re
import math
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass
from collections import Counter

try:
    import jieba
except ImportError:
    jieba = None

logger = logging.getLogger(__name__)


@dataclass
class RerankResult:
    """重排序结果"""

    content: str
    semantic_score: float
    keyword_coverage: float
    contextual_relevance: float
    final_score: float
    metadata: Dict[str, Any] = None

    def __post_init__(self):
        if self.metadata is None:
            self.metadata = {}


class AdvancedReranker:
    """高级重排序器"""

    def __init__(self, llm_client):
        self.llm_client = llm_client

        # 重排序权重配置
        self.weights = {
            "semantic_matching": 0.4,  # 语义匹配度权重
            "keyword_coverage": 0.3,  # 关键信息覆盖度权重
            "contextual_relevance": 0.3,  # 上下文相关性权重
        }

    def rerank(
        self, query: str, results: List[Dict], context: str = ""
    ) -> List[RerankResult]:
        if not results:
            return []

        try:
            # 1. 计算语义匹配度
            semantic_scores = self._calculate_semantic_matching(query, results)

            # 2. 计算关键信息覆盖度
            keyword_scores = self._calculate_keyword_coverage(query, results)

            # 3. 计算上下文相关性
            contextual_scores = self._calculate_contextual_relevance(
                query, results, context
            )

            # 4. 合并分数并排序
            rerank_results = self._merge_scores(
                results, semantic_scores, keyword_scores, contextual_scores
            )

            return rerank_results

        except Exception as e:
            logger.error(f"重排序失败: {e}")
            # 回退到简单排序
            return self._fallback_rerank(results)

    def _calculate_semantic_matching(
        self, query: str, results: List[Dict]
    ) -> List[float]:
        """计算语义匹配度"""
        scores = []

        for result in results:
            content = result.get("content", "")

            # 使用LLM评估语义匹配度
            prompt = f"""请评估以下文档与查询的语义匹配度（0-10分）：

查询：{query}

文档内容：{content[:500]}...

评分标准：
- 10分：完全匹配，直接回答问题
- 8-9分：高度相关，包含核心信息
- 6-7分：部分相关，有一定帮助
- 4-5分：略微相关，信息有限
- 0-3分：不相关或无关紧要

请只返回一个数字分数："""

            try:
                response = self.llm_client.invoke(prompt)
                score_match = re.search(r"(\d+(?:\.\d+)?)", response)
                if score_match:
                    score = float(score_match.group(1)) / 10.0  # 归一化到0-1
                else:
                    score = 0.5  # 默认分数
            except Exception as e:
                logger.error(f"语义匹配度计算失败: {e}")
                score = 0.5

            scores.append(score)

        return scores

    def _calculate_keyword_coverage(
        self, query: str, results: List[Dict]
    ) -> List[float]:
        """计算关键信息覆盖度"""
        scores = []

        # 提取查询关键词
        query_keywords = self._extract_keywords(query)

        for result in results:
            content = result.get("content", "")
            content_keywords = self._extract_keywords(content)

            # 计算关键词覆盖度
            coverage = self._calculate_coverage_ratio(query_keywords, content_keywords)
            scores.append(coverage)

        return scores

    def _calculate_contextual_relevance(
        self, query: str, results: List[Dict], context: str
    ) -> List[float]:
        """计算上下文相关性"""
        scores = []

        for result in results:
            content = result.get("content", "")

            # 使用LLM评估上下文相关性
            prompt = f"""请评估以下文档是否能直接回答用户问题（0-10分）：

用户问题：{query}

对话上下文：{context[:300]}...

文档内容：{content[:500]}...

评分标准：
- 10分：完全能回答问题，提供直接解决方案
- 8-9分：高度相关，包含重要信息
- 6-7分：部分相关，有一定帮助
- 4-5分：略微相关，信息有限
- 0-3分：不相关或无法回答问题

请只返回一个数字分数："""

            try:
                response = self.llm_client.invoke(prompt)
                score_match = re.search(r"(\d+(?:\.\d+)?)", response)
                if score_match:
                    score = float(score_match.group(1)) / 10.0  # 归一化到0-1
                else:
                    score = 0.5  # 默认分数
            except Exception as e:
                logger.error(f"上下文相关性计算失败: {e}")
                score = 0.5

            scores.append(score)

        return scores

    def _extract_keywords(self, text: str) -> List[str]:
        """提取关键词"""
        if jieba is None:
            # 如果没有jieba，使用简单的空格分词
            words = text.split()
        else:
            # 使用jieba分词
            words = jieba.lcut(text)

        # 过滤停用词和短词
        stop_words = {
            "的",
            "了",
            "在",
            "是",
            "我",
            "有",
            "和",
            "就",
            "不",
            "人",
            "都",
            "一",
            "一个",
            "上",
            "也",
            "很",
            "到",
            "说",
            "要",
            "去",
            "你",
            "会",
            "着",
            "没有",
            "看",
            "好",
            "自己",
            "这",
            "那",
            "它",
            "他",
            "她",
            "我们",
            "你们",
            "他们",
            "这个",
            "那个",
        }

        keywords = []
        for word in words:
            if len(word) > 1 and word not in stop_words:
                keywords.append(word)

        return keywords

    def _calculate_coverage_ratio(
        self, query_keywords: List[str], content_keywords: List[str]
    ) -> float:
        """计算关键词覆盖度"""
        if not query_keywords:
            return 0.0

        # 计算匹配的关键词数量
        matched_keywords = 0
        for keyword in query_keywords:
            if keyword in content_keywords:
                matched_keywords += 1

        # 计算覆盖度比例
        coverage = matched_keywords / len(query_keywords)
        return min(coverage, 1.0)  # 确保不超过1.0

    def _merge_scores(
        self,
        results: List[Dict],
        semantic_scores: List[float],
        keyword_scores: List[float],
        contextual_scores: List[float],
    ) -> List[RerankResult]:
        """合并各种分数"""
        rerank_results = []

        for i, result in enumerate(results):
            # 计算最终分数
            final_score = (
                semantic_scores[i] * self.weights["semantic_matching"]
                + keyword_scores[i] * self.weights["keyword_coverage"]
                + contextual_scores[i] * self.weights["contextual_relevance"]
            )

            rerank_results.append(
                RerankResult(
                    content=result.get("content", ""),
                    semantic_score=semantic_scores[i],
                    keyword_coverage=keyword_scores[i],
                    contextual_relevance=contextual_scores[i],
                    final_score=final_score,
                    metadata=result.get("metadata", {}),
                )
            )

        # 按最终分数排序
        rerank_results.sort(key=lambda x: x.final_score, reverse=True)

        return rerank_results

    def _fallback_rerank(self, results: List[Dict]) -> List[RerankResult]:
        """回退重排序（简单按原分数排序）"""
        rerank_results = []

        for result in results:
            rerank_results.append(
                RerankResult(
                    content=result.get("content", ""),
                    semantic_score=result.get("score", 0.0),
                    keyword_coverage=0.5,
                    contextual_relevance=0.5,
                    final_score=result.get("score", 0.0),
                    metadata=result.get("metadata", {}),
                )
            )

        # 按分数排序
        rerank_results.sort(key=lambda x: x.final_score, reverse=True)

        return rerank_results


class RerankManager:
    """重排序管理器"""

    def __init__(self, llm_client):
        self.llm_client = llm_client
        self.advanced_reranker = AdvancedReranker(llm_client)

    def rerank_results(
        self, query: str, results: List[Dict], context: str = "", rag_logger=None
    ) -> List[RerankResult]:
        """重排序主入口"""
        try:
            # 使用高级重排序器
            rerank_results = self.advanced_reranker.rerank(query, results, context)

            # 记录重排序统计信息（已改为DEBUG级别，不在终端显示）
            logger.debug(f"重排序完成：{len(rerank_results)}个结果")

            # 记录详细的重排序信息
            if rag_logger and rerank_results:
                # 计算平均分数
                semantic_avg = sum(r.semantic_score for r in rerank_results) / len(
                    rerank_results
                )
                keyword_avg = sum(r.keyword_coverage for r in rerank_results) / len(
                    rerank_results
                )
                contextual_avg = sum(
                    r.contextual_relevance for r in rerank_results
                ) / len(rerank_results)

                rag_logger.log_reranking_scores(
                    semantic_avg,
                    keyword_avg,
                    contextual_avg,
                    self.advanced_reranker.weights,
                )

                # 记录Top结果
                top_results = [
                    {
                        "content": r.content,
                        "final_score": r.final_score,
                        "semantic_score": r.semantic_score,
                        "keyword_coverage": r.keyword_coverage,
                        "contextual_relevance": r.contextual_relevance,
                    }
                    for r in rerank_results[:10]
                ]
                rag_logger.log_reranking_results(top_results)

            return rerank_results

        except Exception as e:
            logger.error(f"重排序失败: {e}")
            if rag_logger:
                rag_logger.log_error("重排序", e)
            # 回退到简单排序
            return self._simple_rerank(results)

    def _simple_rerank(self, results: List[Dict]) -> List[RerankResult]:
        """简单重排序（按原分数排序）"""
        rerank_results = []

        for result in results:
            rerank_results.append(
                RerankResult(
                    content=result.get("content", ""),
                    semantic_score=result.get("score", 0.0),
                    keyword_coverage=0.5,
                    contextual_relevance=0.5,
                    final_score=result.get("score", 0.0),
                    metadata=result.get("metadata", {}),
                )
            )

        # 按分数排序
        rerank_results.sort(key=lambda x: x.final_score, reverse=True)

        return rerank_results
