"""
混合检索系统：语义检索 + BM25关键词检索 + 元数据过滤
"""

import logging
import re
import math
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass
from collections import Counter, defaultdict

try:
    import jieba
except ImportError:
    jieba = None
import hashlib

logger = logging.getLogger(__name__)


from .common import RetrievalResult


class BM25Retriever:
    """BM25关键词检索器"""

    def __init__(self, k1: float = 1.2, b: float = 0.75):
        self.k1 = k1  # 词频饱和度参数
        self.b = b  # 长度归一化参数
        self.documents = []
        self.doc_freqs = []
        self.idf = {}
        self.doc_len = []
        self.avgdl = 0

    def fit(self, documents: List[str]):
        """BM25数据预处理"""
        self.documents = documents
        self.doc_len = [len(doc.split()) for doc in documents]
        self.avgdl = sum(self.doc_len) / len(self.doc_len) if self.doc_len else 0

        # 计算文档频率
        df = defaultdict(int)
        for doc in documents:
            words = self._tokenize(doc)
            unique_words = set(words)
            for word in unique_words:
                df[word] += 1

        # 计算IDF
        N = len(documents)
        for word, freq in df.items():
            self.idf[word] = math.log(N - freq + 0.5) - math.log(freq + 0.5)

    def _tokenize(self, text: str) -> List[str]:
        """中文分词"""
        if jieba is None:
            # 如果没有jieba，使用简单的空格分词
            words = text.split()
        else:
            # 使用jieba分词
            words = jieba.lcut(text)

        # 过滤停用词和短词
        stop_words = {
            "的",
            "了",
            "在",
            "是",
            "我",
            "有",
            "和",
            "就",
            "不",
            "人",
            "都",
            "一",
            "一个",
            "上",
            "也",
            "很",
            "到",
            "说",
            "要",
            "去",
            "你",
            "会",
            "着",
            "没有",
            "看",
            "好",
            "自己",
            "这",
        }
        return [word for word in words if len(word) > 1 and word not in stop_words]

    def search(self, query: str, top_k: int = 10) -> List[RetrievalResult]:
        """BM25检索"""
        if not self.documents:
            return []

        query_words = self._tokenize(query)
        scores = []

        for i, doc in enumerate(self.documents):
            doc_words = self._tokenize(doc)
            score = self._bm25_score(query_words, doc_words, i)
            scores.append((score, doc))

        # 按分数排序
        scores.sort(key=lambda x: x[0], reverse=True)

        results = []
        for score, content in scores[:top_k]:
            results.append(
                RetrievalResult(
                    content=content, score=score, source="bm25", retrieval_type="bm25"
                )
            )

        return results

    def _bm25_score(
        self, query_words: List[str], doc_words: List[str], doc_idx: int
    ) -> float:
        """计算BM25分数"""
        score = 0
        doc_len = self.doc_len[doc_idx]

        for word in query_words:
            if word in doc_words:
                tf = doc_words.count(word)
                idf = self.idf.get(word, 0)
                score += (
                    idf
                    * (tf * (self.k1 + 1))
                    / (tf + self.k1 * (1 - self.b + self.b * doc_len / self.avgdl))
                )

        return score


class MetadataFilter:
    """元数据过滤器"""

    def __init__(self, data_integrator=None):
        self.data_integrator = data_integrator

        # 从CSV数据中提取的元数据标签
        self.service_tags = {
            "AuthService": [
                "认证",
                "登录",
                "token",
                "jwt",
                "权限",
                "用户验证",
                "认证服务",
            ],
            "OrderService": ["订单", "下单", "支付", "交易", "购买", "订单服务"],
            "PaymentService": ["支付", "付款", "退款", "账单", "金额", "支付服务"],
            "UserService": ["用户", "注册", "个人信息", "账户", "用户服务"],
            "Database": [
                "数据库",
                "连接",
                "查询",
                "事务",
                "死锁",
                "mysql",
                "数据库连接",
            ],
            "Redis": ["缓存", "redis", "内存", "存储", "缓存服务"],
            "GatewayService": ["网关", "路由", "负载均衡", "限流", "网关服务"],
            "NotificationService": ["通知", "消息", "邮件", "短信", "通知服务"],
            "StockService": ["库存", "库存管理", "库存服务"],
            "SearchService": ["搜索", "全文检索", "搜索服务"],
            "FileService": ["文件", "文件上传", "文件服务"],
            "LogService": ["日志", "日志服务"],
            "MonitorService": ["监控", "监控服务"],
            "ConfigService": ["配置", "配置服务"],
        }

        self.error_levels = {
            "ERROR": ["错误", "异常", "失败", "error", "exception", "ERROR"],
            "WARN": ["警告", "warn", "warning", "WARN"],
            "FATAL": ["致命", "严重", "fatal", "critical", "FATAL"],
            "INFO": ["信息", "info", "日志", "INFO"],
        }

        self.components = {
            "TokenValidator": ["token验证", "jwt验证", "签名验证", "TokenValidator"],
            "DBConnector": ["数据库连接", "连接池", "连接管理", "DBConnector"],
            "CacheManager": ["缓存管理", "缓存操作", "CacheManager"],
            "LoadBalancer": ["负载均衡", "流量分发", "LoadBalancer"],
            "CircuitBreaker": ["熔断器", "熔断保护", "CircuitBreaker"],
            "RateLimitService": ["限流", "流量控制", "RateLimitService"],
            "SessionService": ["会话", "会话管理", "SessionService"],
        }

        # 故障类型标签
        self.fault_types = {
            "数据库问题": ["数据库", "连接池", "死锁", "慢查询", "数据库连接"],
            "网络问题": ["网络", "超时", "连接失败", "网络延迟"],
            "内存问题": ["内存", "OOM", "内存泄漏", "GC", "堆内存"],
            "配置问题": ["配置", "参数", "设置", "配置错误"],
            "服务问题": ["服务", "微服务", "服务调用", "服务发现"],
        }

        # 严重程度标签
        self.severity_levels = {
            "高": ["高", "严重", "紧急", "critical", "fatal"],
            "中": ["中", "中等", "medium", "warning"],
            "低": ["低", "轻微", "low", "info"],
        }

    def extract_metadata(self, query: str) -> Dict[str, Any]:
        """从查询中提取元数据标签"""
        metadata = {
            "services": [],
            "error_levels": [],
            "components": [],
            "fault_types": [],
            "severity_levels": [],
            "time_range": None,
        }

        query_lower = query.lower()

        # 提取服务名称
        for service, keywords in self.service_tags.items():
            if any(keyword in query_lower for keyword in keywords):
                metadata["services"].append(service)

        # 提取错误级别
        for level, keywords in self.error_levels.items():
            if any(keyword in query_lower for keyword in keywords):
                metadata["error_levels"].append(level)

        # 提取组件名称
        for component, keywords in self.components.items():
            if any(keyword in query_lower for keyword in keywords):
                metadata["components"].append(component)

        # 提取故障类型
        for fault_type, keywords in self.fault_types.items():
            if any(keyword in query_lower for keyword in keywords):
                metadata["fault_types"].append(fault_type)

        # 提取严重程度
        for severity, keywords in self.severity_levels.items():
            if any(keyword in query_lower for keyword in keywords):
                metadata["severity_levels"].append(severity)

        # 提取时间范围
        time_patterns = {
            "最近1小时": r"最近.*?小时|1小时|一小时",
            "今天": r"今天|今日",
            "本周": r"本周|这周|7天",
            "最近": r"最近|近期",
        }

        for time_range, pattern in time_patterns.items():
            if re.search(pattern, query):
                metadata["time_range"] = time_range
                break

        return metadata

    def filter_by_metadata(
        self, results: List[RetrievalResult], metadata: Dict[str, Any]
    ) -> List[RetrievalResult]:
        """根据元数据过滤结果"""
        if not any(
            [
                metadata["services"],
                metadata["error_levels"],
                metadata["components"],
                metadata["fault_types"],
                metadata["severity_levels"],
            ]
        ):
            return results

        filtered_results = []

        for result in results:
            content_lower = result.content.lower()
            score = 0
            match_count = 0

            # 服务名称匹配
            if metadata["services"]:
                for service in metadata["services"]:
                    if any(
                        keyword in content_lower
                        for keyword in self.service_tags.get(service, [])
                    ):
                        score += 0.3
                        match_count += 1

            # 错误级别匹配
            if metadata["error_levels"]:
                for level in metadata["error_levels"]:
                    if any(
                        keyword in content_lower
                        for keyword in self.error_levels.get(level, [])
                    ):
                        score += 0.2
                        match_count += 1

            # 组件名称匹配
            if metadata["components"]:
                for component in metadata["components"]:
                    if any(
                        keyword in content_lower
                        for keyword in self.components.get(component, [])
                    ):
                        score += 0.2
                        match_count += 1

            # 故障类型匹配
            if metadata["fault_types"]:
                for fault_type in metadata["fault_types"]:
                    if any(
                        keyword in content_lower
                        for keyword in self.fault_types.get(fault_type, [])
                    ):
                        score += 0.2
                        match_count += 1

            # 严重程度匹配
            if metadata["severity_levels"]:
                for severity in metadata["severity_levels"]:
                    if any(
                        keyword in content_lower
                        for keyword in self.severity_levels.get(severity, [])
                    ):
                        score += 0.1
                        match_count += 1

            if score > 0:
                # 更新结果分数
                result.score = score
                result.metadata = metadata
                filtered_results.append(result)

        return filtered_results


class HybridRetriever:
    """混合检索器, 整合语义检索、BM25检索和元数据过滤"""

    def __init__(self, vector_system, llm_client, data_integrator=None):
        self.vector_system = vector_system
        self.llm_client = llm_client
        self.data_integrator = data_integrator

        # 初始化各个检索器
        self.bm25_retriever = BM25Retriever()
        self.metadata_filter = MetadataFilter(data_integrator)

        # 权重配置
        self.weights = {"semantic": 0.6, "bm25": 0.3, "metadata": 0.1}

        # 训练BM25模型（如果有数据）
        self._train_bm25_model()

    def _train_bm25_model(self):
        """训练BM25模型"""
        try:
            if self.data_integrator and self.data_integrator.structured_data:
                # 从结构化数据中提取文档内容
                documents = [
                    item.content for item in self.data_integrator.structured_data
                ]
                self.bm25_retriever.fit(documents)
                logger.debug(f"BM25模型训练完成，共 {len(documents)} 个文档")
        except Exception as e:
            logger.warning(f"BM25模型训练失败: {e}")

    def retrieve(
        self, query: str, top_k: int = 10, rag_logger=None
    ) -> List[RetrievalResult]:
        """混合检索主入口"""
        try:
            # 1. 语义检索
            semantic_results = self._semantic_retrieve(query, top_k * 2)
            if rag_logger:
                semantic_scores = [r.score for r in semantic_results]
                rag_logger.log_semantic_retrieval(
                    len(semantic_results), semantic_scores
                )

            # 2. BM25检索
            bm25_results = self._bm25_retrieve(query, top_k * 2)
            if rag_logger:
                bm25_scores = [r.score for r in bm25_results]
                rag_logger.log_bm25_retrieval(len(bm25_results), bm25_scores)

            # 3. 元数据过滤
            metadata_results = self._metadata_filter(query, top_k * 2)
            if rag_logger:
                metadata_info = self.metadata_filter.extract_metadata(query)
                rag_logger.log_metadata_filter(len(metadata_results), metadata_info)

            # 4. 权重合并
            merged_results = self._merge_results(
                semantic_results, bm25_results, metadata_results
            )

            # 5. 去重和排序
            final_results = self._deduplicate_and_sort(merged_results, top_k)

            # 记录合并结果
            if rag_logger:
                rag_logger.log_merge_results(len(final_results), self.weights)

            return final_results

        except Exception as e:
            logger.error(f"混合检索失败: {e}")
            if rag_logger:
                rag_logger.log_error("混合检索", e)
            # 回退到语义检索
            return self._semantic_retrieve(query, top_k)

    def _semantic_retrieve(self, query: str, top_k: int) -> List[RetrievalResult]:
        """语义检索"""
        try:
            results = self.vector_system.retrieve_logs(query, top_k=top_k)
            retrieval_results = []

            for result in results:
                retrieval_results.append(
                    RetrievalResult(
                        content=result["content"],
                        score=result["score"] * self.weights["semantic"],
                        source="vector_db",
                        retrieval_type="semantic",
                    )
                )

            return retrieval_results
        except Exception as e:
            logger.error(f"语义检索失败: {e}")
            return []

    def _bm25_retrieve(self, query: str, top_k: int) -> List[RetrievalResult]:
        """BM25检索"""
        try:
            # 需要先训练BM25模型（这里简化处理）
            # 实际应用中需要从日志数据构建BM25索引
            results = self.bm25_retriever.search(query, top_k)

            # 应用权重
            for result in results:
                result.score *= self.weights["bm25"]

            return results
        except Exception as e:
            logger.error(f"BM25检索失败: {e}")
            return []

    def _metadata_filter(self, query: str, top_k: int) -> List[RetrievalResult]:
        """元数据过滤"""
        try:
            # 提取元数据
            metadata = self.metadata_filter.extract_metadata(query)

            # 获取基础检索结果
            base_results = self.vector_system.retrieve_logs(query, top_k=top_k)

            # 转换为RetrievalResult格式
            results = []
            for result in base_results:
                results.append(
                    RetrievalResult(
                        content=result["content"],
                        score=result["score"],
                        source="metadata_filter",
                        retrieval_type="metadata",
                    )
                )

            # 应用元数据过滤
            filtered_results = self.metadata_filter.filter_by_metadata(
                results, metadata
            )

            # 应用权重
            for result in filtered_results:
                result.score *= self.weights["metadata"]

            return filtered_results
        except Exception as e:
            logger.error(f"元数据过滤失败: {e}")
            return []

    def _merge_results(
        self,
        semantic_results: List[RetrievalResult],
        bm25_results: List[RetrievalResult],
        metadata_results: List[RetrievalResult],
    ) -> List[RetrievalResult]:
        """合并三种检索结果"""
        # 使用内容哈希作为唯一标识
        content_scores = {}

        # 合并语义检索结果
        for result in semantic_results:
            content_hash = hashlib.md5(result.content.encode()).hexdigest()
            if content_hash not in content_scores:
                content_scores[content_hash] = {
                    "content": result.content,
                    "semantic_score": result.score,
                    "bm25_score": 0,
                    "metadata_score": 0,
                    "source": result.source,
                    "metadata": result.metadata,
                    "retrieval_types": set(),
                }
            else:
                content_scores[content_hash]["semantic_score"] = result.score
            content_scores[content_hash]["retrieval_types"].add("semantic")

        # 合并BM25检索结果
        for result in bm25_results:
            content_hash = hashlib.md5(result.content.encode()).hexdigest()
            if content_hash not in content_scores:
                content_scores[content_hash] = {
                    "content": result.content,
                    "semantic_score": 0,
                    "bm25_score": result.score,
                    "metadata_score": 0,
                    "source": result.source,
                    "metadata": result.metadata,
                    "retrieval_types": set(),
                }
            else:
                content_scores[content_hash]["bm25_score"] = result.score
            content_scores[content_hash]["retrieval_types"].add("bm25")

        # 合并元数据过滤结果
        for result in metadata_results:
            content_hash = hashlib.md5(result.content.encode()).hexdigest()
            if content_hash not in content_scores:
                content_scores[content_hash] = {
                    "content": result.content,
                    "semantic_score": 0,
                    "bm25_score": 0,
                    "metadata_score": result.score,
                    "source": result.source,
                    "metadata": result.metadata,
                    "retrieval_types": set(),
                }
            else:
                content_scores[content_hash]["metadata_score"] = result.score
            content_scores[content_hash]["retrieval_types"].add("metadata")

        # 计算最终分数
        merged_results = []
        for content_hash, scores in content_scores.items():
            # 计算加权分数
            final_score = (
                scores["semantic_score"] * self.weights["semantic"]
                + scores["bm25_score"] * self.weights["bm25"]
                + scores["metadata_score"] * self.weights["metadata"]
            )

            diversity_bonus = len(scores["retrieval_types"]) * 0.1
            final_score += diversity_bonus

            merged_results.append(
                RetrievalResult(
                    content=scores["content"],
                    score=final_score,
                    source=scores["source"],
                    metadata=scores["metadata"],
                    retrieval_type="hybrid",
                )
            )

        return merged_results

    def _deduplicate_and_sort(
        self, results: List[RetrievalResult], top_k: int
    ) -> List[RetrievalResult]:
        """去重和排序"""
        # 按分数排序
        results.sort(key=lambda x: x.score, reverse=True)

        # 返回top_k结果
        return results[:top_k]
