"""
增强RAG系统
"""

import logging
import re
import json
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass
from enum import Enum
import hashlib

logger = logging.getLogger(__name__)


class QueryRewriteStrategy(Enum):
    """查询重写策略"""

    CANONICALIZATION = "canonicalization"  # 规范化重写
    PARAPHRASING = "paraphrasing"  # 同义改写
    STEP_BACK = "step_back"  # 泛化重写
    MULTI_QUERY = "multi_query"  # 多查询生成
    DECOMPOSITION = "decomposition"  # 问题分解


from .common import RetrievalResult, QueryContext
from .rag_logger import get_rag_logger


class QueryRewriter:
    """查询重写器"""

    def __init__(self, llm_client):
        self.llm_client = llm_client

    def rewrite_query(
        self, query: str, context: QueryContext, strategy: QueryRewriteStrategy
    ) -> List[str]:
        """根据策略重写查询"""
        try:
            if strategy == QueryRewriteStrategy.CANONICALIZATION:
                return self._canonicalize_query(query, context)
            elif strategy == QueryRewriteStrategy.PARAPHRASING:
                return self._paraphrase_query(query, context)
            elif strategy == QueryRewriteStrategy.STEP_BACK:
                return self._step_back_query(query, context)
            elif strategy == QueryRewriteStrategy.MULTI_QUERY:
                return self._multi_query_generation(query, context)
            elif strategy == QueryRewriteStrategy.DECOMPOSITION:
                return self._decompose_query(query, context)
            else:
                return [query]
        except Exception as e:
            logger.error(f"查询重写失败: {e}")
            return [query]

    def _canonicalize_query(self, query: str, context: QueryContext) -> List[str]:
        """规范化重写 - 将随意、模糊、口语化表达转成标准清晰的问题"""
        prompt = f"""请将以下用户查询重写为更规范、清晰的问题表述：

原始查询：{query}

对话历史：
{context.conversation_history}

请提供1个规范化的问题，要求：
1. 使用标准的技术术语
2. 问题表述清晰明确
3. 保持原意不变
4. 适合向量检索
5. 输出只包含规范化后的问题，不要包含其他的引入或语句

规范化查询："""

        try:
            response = self.llm_client.invoke(prompt)
            return [response.strip()]
        except Exception as e:
            logger.error(f"规范化重写失败: {e}")
            return [query]

    def _paraphrase_query(self, query: str, context: QueryContext) -> List[str]:
        """同义改写 - 增强表达覆盖、抗embedding漏召"""
        prompt = f"""请为以下查询生成2-3个同义表达，提高检索覆盖率：

原始查询：{query}

对话历史：
{context.conversation_history}

请生成同义查询，要求：
1. 保持核心语义不变
2. 使用不同的表达方式
3. 包含关键词的同义词
4. 适合向量相似度搜索
5. 输出只包含规范化后的问题，不要包含其他的引入或语句

同义查询："""

        try:
            response = self.llm_client.invoke(prompt)
            # 解析多行响应
            queries = [line.strip() for line in response.split("\n") if line.strip()]
            return queries[:3]  # 最多3个
        except Exception as e:
            logger.error(f"同义改写失败: {e}")
            return [query]

    def _step_back_query(self, query: str, context: QueryContext) -> List[str]:
        """泛化重写 - 提升复杂问题检索效果"""
        prompt = f"""请将以下具体问题泛化为更广泛的问题，扩大检索范围：

原始查询：{query}

对话历史：
{context.conversation_history}

请提供1个泛化问题，要求：
1. 将具体问题抽象为更广泛的概念
2. 包含相关的背景信息
3. 扩大检索范围
4. 保持与原始问题的关联性
5. 输出只包含规范化后的问题，不要包含其他的引入或语句

泛化查询："""

        try:
            response = self.llm_client.invoke(prompt)
            return [response.strip()]
        except Exception as e:
            logger.error(f"泛化重写失败: {e}")
            return [query]

    def _multi_query_generation(self, query: str, context: QueryContext) -> List[str]:
        """多查询生成 - 多视角覆盖、提升召回率"""
        prompt = f"""请为以下查询生成多个不同角度的查询，提高检索覆盖率：

原始查询：{query}

对话历史：
{context.conversation_history}

请生成3-4个不同角度的查询，要求：
1. 从不同角度表达同一问题
2. 包含不同的关键词组合
3. 覆盖问题的各个方面
4. 每个查询都适合向量检索
5. 输出只包含规范化后的问题，不要包含其他的引入或语句

多角度查询："""

        try:
            response = self.llm_client.invoke(prompt)
            queries = [line.strip() for line in response.split("\n") if line.strip()]
            return queries[:4]  # 最多4个
        except Exception as e:
            logger.error(f"多查询生成失败: {e}")
            return [query]

    def _decompose_query(self, query: str, context: QueryContext) -> List[str]:
        """问题分解 - 将复杂查询拆分为多个子问题"""
        prompt = f"""请将以下复杂查询分解为多个简单的子问题：

原始查询：{query}

对话历史：
{context.conversation_history}

请分解为2-3个子问题，要求：
1. 每个子问题都是独立的、可回答的
2. 子问题之间逻辑相关
3. 覆盖原始查询的所有方面
4. 每个子问题都适合单独检索

子问题："""

        try:
            response = self.llm_client.invoke(prompt)
            queries = [line.strip() for line in response.split("\n") if line.strip()]
            return queries[:3]  # 最多3个
        except Exception as e:
            logger.error(f"问题分解失败: {e}")
            return [query]


class ResultReranker:
    """结果重排器 - 提升检索结果质量"""

    def __init__(self, llm_client):
        self.llm_client = llm_client

    def rerank_results(
        self, query: str, results: List[RetrievalResult], context: QueryContext
    ) -> List[RetrievalResult]:
        """对检索结果进行重排"""
        if not results:
            return results

        try:
            # 使用LLM对结果进行评分和重排
            scored_results = self._score_results(query, results, context)

            # 按分数排序
            scored_results.sort(key=lambda x: x.score, reverse=True)

            return scored_results
        except Exception as e:
            logger.error(f"结果重排失败: {e}")
            return results

    def _score_results(
        self, query: str, results: List[RetrievalResult], context: QueryContext
    ) -> List[RetrievalResult]:
        """使用LLM对结果进行评分"""
        scored_results = []

        for i, result in enumerate(results):
            prompt = f"""请对以下检索结果与查询的相关性进行评分（0-10分）：

查询：{query}

对话历史：
{context.conversation_history}

检索结果 {i+1}：
{result.content[:500]}...

评分标准：
- 10分：完全相关，直接回答问题
- 8-9分：高度相关，包含重要信息
- 6-7分：部分相关，有一定帮助
- 4-5分：略微相关，信息有限
- 0-3分：不相关或无关紧要

请只返回一个数字分数："""

            try:
                response = self.llm_client.invoke(prompt)
                # 提取数字分数
                score_match = re.search(r"(\d+(?:\.\d+)?)", response)
                if score_match:
                    new_score = float(score_match.group(1))
                    # 创建新的结果对象，更新分数
                    new_result = RetrievalResult(
                        content=result.content,
                        score=new_score,
                        source=result.source,
                        metadata=result.metadata,
                    )
                    scored_results.append(new_result)
                else:
                    # 如果无法解析分数，保持原分数
                    scored_results.append(result)
            except Exception as e:
                logger.error(f"评分失败: {e}")
                scored_results.append(result)

        return scored_results


class DynamicThresholdManager:
    """动态阈值管理器 - 解决检索结果分数太低的问题"""

    def __init__(self, base_threshold: float = 0.5):
        self.base_threshold = base_threshold
        self.adaptive_threshold = base_threshold

    def should_include_result(
        self, result: RetrievalResult, context: QueryContext
    ) -> bool:
        """判断是否应该包含检索结果"""
        # 基础阈值检查
        if result.score >= self.base_threshold:
            return True

        # 动态调整阈值
        adjusted_threshold = self._calculate_adjusted_threshold(context)

        # 如果分数接近调整后的阈值，也包含
        if result.score >= adjusted_threshold * 0.8:
            return True

        # 如果是多轮对话的追问，降低阈值
        if context.query_type == "follow_up":
            return result.score >= self.base_threshold * 0.6

        return False

    def _calculate_adjusted_threshold(self, context: QueryContext) -> float:
        """计算调整后的阈值"""
        threshold = self.base_threshold

        # 根据对话历史长度调整
        history_length = len(context.conversation_history)
        if history_length > 2000:  # 长对话
            threshold *= 0.8
        elif history_length < 500:  # 短对话
            threshold *= 1.2

        # 根据查询类型调整
        if context.query_type == "follow_up":
            threshold *= 0.7
        elif context.query_type == "clarification":
            threshold *= 0.6

        return max(0.1, min(1.0, threshold))  # 限制在0.1-1.0之间


class EnhancedRAGSystem:
    """整合所有模块"""

    def __init__(self, vector_system, llm_client):
        self.vector_system = vector_system
        self.llm_client = llm_client

        # 初始化组件
        self.query_rewriter = QueryRewriter(llm_client)
        self.result_reranker = ResultReranker(llm_client)
        self.threshold_manager = DynamicThresholdManager()

        # 初始化混合检索和高级重排序
        from .data_integration import DataIntegrator
        from .hybrid_retrieval import HybridRetriever
        from .advanced_reranker import RerankManager

        # 数据整合器
        self.data_integrator = DataIntegrator()
        self.data_integrator.load_and_integrate_data()

        # 混合检索器
        self.hybrid_retriever = HybridRetriever(
            vector_system=vector_system,
            llm_client=llm_client,
            data_integrator=self.data_integrator,
        )

        # 高级重排序器
        self.advanced_reranker = RerankManager(llm_client)

    def enhanced_retrieve(
        self, query: str, conversation_history: str, session_id: str, user_id: str
    ) -> Dict[str, Any]:
        """增强检索 - 主要入口函数"""
        # 创建日志记录器
        rag_logger = get_rag_logger(query)
        rag_logger.log_start(query, session_id, user_id)

        try:
            # 1. 构建查询上下文
            query_type = self._detect_query_type(query, conversation_history)
            context = QueryContext(
                original_query=query,
                rewritten_queries=[],
                conversation_history=conversation_history,
                session_id=session_id,
                user_id=user_id,
                query_type=query_type,
            )

            # 记录查询类型检测
            rag_logger.log_query_type_detection(query_type)

            # 2. 查询重写
            rewritten_queries = self._rewrite_queries(query, context)
            context.rewritten_queries = rewritten_queries

            # 记录查询重写
            strategy_name = self._get_strategy_name(query_type)
            rag_logger.log_query_rewrite(query, rewritten_queries, strategy_name)

            # 3. 混合检索（语义 + BM25 + 元数据过滤）
            rag_logger.log_hybrid_retrieval_start(
                rewritten_queries[1] + conversation_history, top_k=20
            )
            hybrid_results = self.hybrid_retriever.retrieve(
                rewritten_queries[1] + conversation_history,
                top_k=20,
                rag_logger=rag_logger,
            )

            # 4. 高级重排序
            rag_logger.log_reranking_start(len(hybrid_results))
            rerank_results = self.advanced_reranker.rerank_results(
                query,
                [
                    {"content": r.content, "score": r.score, "metadata": r.metadata}
                    for r in hybrid_results
                ],
                conversation_history,
                rag_logger=rag_logger,
            )

            # 5. 动态阈值过滤
            before_count = len(rerank_results)
            filtered_results = self._filter_by_threshold(rerank_results, context)
            after_count = len(filtered_results)

            # 记录阈值过滤
            rag_logger.log_threshold_filter(
                before_count,
                after_count,
                self.threshold_manager.base_threshold,
                self.threshold_manager._calculate_adjusted_threshold(context),
            )

            # 6. 构建最终相关日志信息
            final_context = self._build_final_context(filtered_results, context)

            retrieval_stats = {
                "total_retrieved": len(hybrid_results),
                "after_rerank": len(rerank_results),
                "after_filter": len(filtered_results),
                "queries_used": len(rewritten_queries),
                "hybrid_retrieval": True,
            }

            # 记录最终结果
            final_results_for_log = [
                {
                    "content": r.content,
                    "final_score": getattr(r, "final_score", getattr(r, "score", 0)),
                    "semantic_score": getattr(r, "semantic_score", 0),
                    "keyword_coverage": getattr(r, "keyword_coverage", 0),
                    "contextual_relevance": getattr(r, "contextual_relevance", 0),
                }
                for r in filtered_results
            ]
            rag_logger.log_final_results(final_results_for_log, retrieval_stats)
            rag_logger.log_end(success=True)

            return {
                "context": final_context,
                "retrieval_stats": retrieval_stats,
                "debug_info": {
                    "original_query": query,
                    "rewritten_queries": rewritten_queries,
                    "query_type": context.query_type,
                    "hybrid_weights": self.hybrid_retriever.weights,
                    "log_file": str(rag_logger.log_file),
                    "json_file": str(rag_logger.json_file),
                },
            }

        except Exception as e:
            logger.error(f"增强检索失败: {e}")
            rag_logger.log_error("增强检索", e)
            rag_logger.log_end(success=False, error_msg=str(e))
            # 回退到基础检索
            return self._fallback_retrieve(query)

    def _get_strategy_name(self, query_type: str) -> str:
        """获取重写策略名称"""
        if "follow_up" in query_type:
            return "同义改写 + 泛化重写"
        elif "clarification" in query_type:
            return "规范化重写"
        else:
            return "多查询生成"

    def _detect_query_type(self, query: str, history: str) -> str:
        """检测查询类型"""
        if not history.strip():
            return "general"

        # 简单的类型检测
        follow_up_indicators = [
            "继续",
            "还有",
            "另外",
            "然后",
            "接下来",
            "具体",
            "详细",
        ]
        clarification_indicators = ["什么意思", "解释", "说明", "为什么", "如何"]

        query_lower = query.lower()

        if any(indicator in query_lower for indicator in follow_up_indicators):
            return "follow_up 同义&泛化重写"
        elif any(indicator in query_lower for indicator in clarification_indicators):
            return "clarification 规范化重写"
        else:
            return "general 多查询生成"

    def _rewrite_queries(self, query: str, context: QueryContext) -> List[str]:
        """查询重写策略选择"""
        queries = [query]  # 保留原始查询

        # 根据查询类型选择重写策略
        if context.query_type == "follow_up":
            # 追问时使用同义改写和泛化
            queries.extend(
                self.query_rewriter.rewrite_query(
                    query, context, QueryRewriteStrategy.PARAPHRASING
                )
            )
            queries.extend(
                self.query_rewriter.rewrite_query(
                    query, context, QueryRewriteStrategy.STEP_BACK
                )
            )
        elif context.query_type == "clarification":
            # 澄清时使用规范化
            queries.extend(
                self.query_rewriter.rewrite_query(
                    query, context, QueryRewriteStrategy.CANONICALIZATION
                )
            )
        else:
            # 一般查询使用多查询生成
            queries.extend(
                self.query_rewriter.rewrite_query(
                    query, context, QueryRewriteStrategy.MULTI_QUERY
                )
            )

        # 去重
        unique_queries = list(dict.fromkeys(queries))
        return unique_queries[:5]  # 最多5个查询

    def _filter_by_threshold(self, results, context: QueryContext):
        """高级阈值过滤"""
        filtered_results = []

        for result in results:
            # 使用高级重排序的最终分数
            if hasattr(result, "final_score"):
                score = result.final_score
            else:
                score = result.score if hasattr(result, "score") else 0.5

            # 动态阈值检查
            if self.threshold_manager.should_include_result(
                RetrievalResult(content=result.content, score=score), context
            ):
                filtered_results.append(result)

        return filtered_results

    def _build_final_context(self, results, context: QueryContext) -> str:
        """构建高级上下文"""
        if not results:
            return ""

        context_parts = []

        # 添加检索到的相关日志
        context_parts.append("【相关日志信息】")
        for i, result in enumerate(results, 1):
            if hasattr(result, "final_score"):
                score = result.final_score
                semantic_score = getattr(result, "semantic_score", 0)
                keyword_coverage = getattr(result, "keyword_coverage", 0)
                contextual_relevance = getattr(result, "contextual_relevance", 0)

                context_parts.append(
                    f"日志 {i} (综合分数: {score:.2f}, 语义: {semantic_score:.2f}, 关键词: {keyword_coverage:.2f}, 上下文: {contextual_relevance:.2f}):"
                )
            else:
                score = result.score if hasattr(result, "score") else 0.5
                context_parts.append(f"日志 {i} (相关度: {score:.2f}):")

            context_parts.append(result.content)
            context_parts.append("")  # 空行分隔

        return "\n".join(context_parts)

    def _fallback_retrieve(self, query: str) -> Dict[str, Any]:
        """回退检索方法"""
        try:
            # 使用基础向量检索
            results = self.vector_system.retrieve_logs(query, top_k=5)

            context_parts = []
            if results:
                context_parts.append("【相关日志信息】")
                for i, result in enumerate(results, 1):
                    context_parts.append(f"日志 {i} (相关度: {result['score']:.2f}):")
                    context_parts.append(result["content"])
                    context_parts.append("")

            return {
                "context": "\n".join(context_parts),
                "retrieval_stats": {
                    "total_retrieved": len(results),
                    "after_rerank": len(results),
                    "after_filter": len(results),
                    "queries_used": 1,
                    "fallback": True,
                },
                "debug_info": {
                    "original_query": query,
                    "rewritten_queries": [query],
                    "query_type": "fallback",
                },
            }
        except Exception as e:
            logger.error(f"回退检索失败: {e}")
            return {
                "context": "",
                "retrieval_stats": {
                    "total_retrieved": 0,
                    "after_rerank": 0,
                    "after_filter": 0,
                    "queries_used": 0,
                    "fallback": True,
                },
                "debug_info": {
                    "original_query": query,
                    "rewritten_queries": [query],
                    "query_type": "fallback",
                },
            }
