"""
银河麒麟OS智能运维助手系统 - 搜索模块
天津理工大学  修改于2025年8月12日

本模块负责向量检索和BM25检索功能，为银河麒麟OS智能运维助手提供混合检索支持。
"""

import numpy as np
import json
import math
import re
import os
from typing import List, Dict, Any, Optional, Tuple
from collections import Counter

class BM25Retriever:
    """
    银河麒麟OS智能运维助手 - BM25检索器实现，用于执行基于关键词的BM25检索
    """
    def __init__(self, metadata_path: str):
        """
        初始化BM25检索器
        
        Args:
            metadata_path: 包含文档元数据的JSON文件路径
        """
        self.k1 = 1.5  # BM25参数
        self.b = 0.75  # BM25参数
        
        # 加载文档
        with open(metadata_path, 'r', encoding='utf-8') as f:
            self.documents = json.load(f)
        
        # 预处理文档
        self.preprocess_documents()

    def preprocess_documents(self) -> None:
        """预处理所有文档，构建BM25所需的索引和统计信息"""
        # 对每个文档进行分词
        self.doc_tokens = []
        for doc in self.documents:
            tokens = self.tokenize(doc['chunk'])
            self.doc_tokens.append(tokens)
        
        # 计算文档平均长度
        self.avg_doc_length = sum(len(tokens) for tokens in self.doc_tokens) / len(self.doc_tokens) if self.doc_tokens else 0
        
        # 计算文档频率
        self.doc_freqs = {}  # 词项在多少个文档中出现
        for tokens in self.doc_tokens:
            for token in set(tokens):  # 每个文档中每个词只计算一次
                if token in self.doc_freqs:
                    self.doc_freqs[token] += 1
                else:
                    self.doc_freqs[token] = 1

    def tokenize(self, text: str) -> List[str]:
        """
        对文本进行分词处理
        
        Args:
            text: 要分词的文本
            
        Returns:
            分词后的token列表
        """
        # 移除标点符号，保留中文、字母、数字
        text = re.sub(r'[^\w\s\u4e00-\u9fff]', ' ', text)
        
        # 按字符分词（适用于中文）
        tokens = []
        current_word = ""
        
        for char in text:
            if '\u4e00' <= char <= '\u9fff':  # 中文字符
                if current_word:
                    tokens.append(current_word)
                    current_word = ""
                tokens.append(char)
            elif char.isalnum():  # 英文字母或数字
                current_word += char
            else:  # 空格或其他字符
                if current_word:
                    tokens.append(current_word)
                    current_word = ""
        
        if current_word:
            tokens.append(current_word)
        
        # 过滤空token和单字符英文
        tokens = [token for token in tokens if token.strip() and (len(token) > 1 or '\u4e00' <= token <= '\u9fff')]
        return tokens

    def compute_bm25_score(self, query_tokens: List[str], doc_idx: int) -> float:
        """
        计算查询与文档的BM25得分
        
        Args:
            query_tokens: 查询分词后的token列表
            doc_idx: 文档索引
            
        Returns:
            BM25得分
        """
        doc_tokens = self.doc_tokens[doc_idx]
        doc_length = len(doc_tokens)
        
        if doc_length == 0:
            return 0.0
        
        # 计算文档中每个token的频率
        term_freqs = Counter(doc_tokens)
        
        score = 0.0
        for token in query_tokens:
            if token in self.doc_freqs:
                # 计算逆文档频率IDF
                idf = math.log((len(self.documents) - self.doc_freqs[token] + 0.5) / 
                              (self.doc_freqs[token] + 0.5) + 1.0)
                
                # 计算BM25得分
                term_freq = term_freqs.get(token, 0)
                if term_freq > 0:
                    score += idf * ((term_freq * (self.k1 + 1)) / 
                                   (term_freq + self.k1 * (1 - self.b + self.b * doc_length / self.avg_doc_length)))
        
        return score

    def search(self, query: str, limit: int = 3) -> List[Dict[str, Any]]:
        """
        使用BM25搜索相关文档
        
        Args:
            query: 搜索查询
            limit: 返回的最大结果数
            
        Returns:
            按相关性排序的文档列表，包含score信息
        """
        query_tokens = self.tokenize(query)
        
        if not query_tokens:
            return []
        
        # 计算所有文档的分数
        scores = []
        for i in range(len(self.documents)):
            score = self.compute_bm25_score(query_tokens, i)
            if score > 0:  # 只考虑有匹配的文档
                scores.append((i, score))
        
        # 按分数排序
        scores.sort(key=lambda x: x[1], reverse=True)
        
        # 返回前limit个结果
        results = []
        for i in range(min(limit, len(scores))):
            doc_idx, score = scores[i]
            result = self.documents[doc_idx].copy()
            result['bm25_score'] = score
            results.append(result)
        
        return results

class HybridRetriever:
    """
    银河麒麟OS智能运维助手 - 改进的混合检索器，结合向量检索和BM25检索的结果，使用加权融合和多样性优化
    """
    def __init__(self, index_path: str, metadata_path: str):
        """
        初始化混合检索器
        
        Args:
            index_path: FAISS索引文件路径
            metadata_path: 元数据文件路径
        """
        import faiss
        self.index_path = index_path
        self.metadata_path = metadata_path
        self.bm25_retriever = BM25Retriever(metadata_path)
        
        # 加载FAISS索引
        self.index = faiss.read_index(index_path)
        
        # 加载元数据
        with open(metadata_path, 'r', encoding='utf-8') as f:
            self.metadata = json.load(f)
        
        # 混合检索权重参数
        self.vector_weight = 0.6  # 向量检索权重
        self.bm25_weight = 0.4    # BM25检索权重
        self.diversity_factor = 0.3  # 多样性因子

    def vector_search(self, query_vector: np.ndarray, limit: int) -> List[Dict[str, Any]]:
        """
        执行向量搜索
        
        Args:
            query_vector: 查询向量
            limit: 返回结果数量
            
        Returns:
            搜索结果，包含score信息
        """
        D, I = self.index.search(query_vector, min(limit * 2, len(self.metadata)))  # 获取更多候选
        results = []
        for i, (idx, score) in enumerate(zip(I[0], D[0])):
            if idx < len(self.metadata):
                result = self.metadata[idx].copy()
                result['vector_score'] = float(score)
                result['rank'] = i
                results.append(result)
        return results

    def normalize_scores(self, results: List[Dict[str, Any]], score_key: str) -> List[Dict[str, Any]]:
        """
        归一化分数到0-1范围
        
        Args:
            results: 结果列表
            score_key: 分数字段名
            
        Returns:
            归一化后的结果列表
        """
        if not results:
            return results
        
        scores = [r.get(score_key, 0) for r in results]
        min_score = min(scores)
        max_score = max(scores)
        
        if max_score - min_score == 0:
            for r in results:
                r[f'normalized_{score_key}'] = 1.0
        else:
            for r in results:
                original_score = r.get(score_key, 0)
                r[f'normalized_{score_key}'] = (original_score - min_score) / (max_score - min_score)
        
        return results

    def calculate_diversity_score(self, result: Dict[str, Any], selected_results: List[Dict[str, Any]]) -> float:
        """
        计算多样性分数，避免选择过于相似的结果
        
        Args:
            result: 当前候选结果
            selected_results: 已选择的结果列表
            
        Returns:
            多样性分数（越高越多样）
        """
        if not selected_results:
            return 1.0
        
        current_text = result.get('chunk', '')
        current_file = result.get('source_file', '')
        
        # 计算与已选择结果的相似度
        similarities = []
        file_penalties = []
        
        for selected in selected_results:
            selected_text = selected.get('chunk', '')
            selected_file = selected.get('source_file', '')
            
            # 文本相似度（简单的Jaccard相似度）
            current_words = set(current_text.split())
            selected_words = set(selected_text.split())
            
            if len(current_words | selected_words) > 0:
                text_similarity = len(current_words & selected_words) / len(current_words | selected_words)
            else:
                text_similarity = 0
            
            similarities.append(text_similarity)
            
            # 文件来源惩罚
            if current_file == selected_file and current_file != '':
                file_penalties.append(0.5)  # 来自同一文件的惩罚
            else:
                file_penalties.append(0.0)
        
        # 计算多样性分数
        avg_similarity = sum(similarities) / len(similarities) if similarities else 0
        avg_file_penalty = sum(file_penalties) / len(file_penalties) if file_penalties else 0
        
        diversity_score = 1.0 - avg_similarity - avg_file_penalty
        return max(0.0, diversity_score)

    def hybrid_search(self, query: str, query_vector: np.ndarray, limit: int = 3) -> List[Dict[str, Any]]:
        """
        执行改进的混合搜索，结合向量检索和BM25检索的结果
        
        Args:
            query: 文本查询
            query_vector: 查询向量
            limit: 返回的最大结果数
            
        Returns:
            混合检索结果
        """
        # 获取更多候选结果用于融合
        candidate_limit = min(limit * 3, 20)
        
        # 获取向量检索结果
        vector_results = self.vector_search(query_vector, candidate_limit)
        
        # 获取BM25检索结果
        bm25_results = self.bm25_retriever.search(query, candidate_limit)
        
        # 归一化分数
        vector_results = self.normalize_scores(vector_results, 'vector_score')
        bm25_results = self.normalize_scores(bm25_results, 'bm25_score')
        
        # 创建候选池，合并两种检索结果
        candidates = {}
        
        # 添加向量检索结果
        for result in vector_results:
            doc_id = result['id']
            if doc_id not in candidates:
                candidates[doc_id] = result.copy()
                candidates[doc_id]['sources'] = set(['vector'])
                candidates[doc_id]['vector_score'] = result.get('normalized_vector_score', 0)
                candidates[doc_id]['bm25_score'] = 0
            else:
                candidates[doc_id]['sources'].add('vector')
                candidates[doc_id]['vector_score'] = result.get('normalized_vector_score', 0)
        
        # 添加BM25检索结果
        for result in bm25_results:
            doc_id = result['id']
            if doc_id not in candidates:
                candidates[doc_id] = result.copy()
                candidates[doc_id]['sources'] = set(['bm25'])
                candidates[doc_id]['vector_score'] = 0
                candidates[doc_id]['bm25_score'] = result.get('normalized_bm25_score', 0)
            else:
                candidates[doc_id]['sources'].add('bm25')
                candidates[doc_id]['bm25_score'] = result.get('normalized_bm25_score', 0)
        
        # 计算融合分数
        for doc_id, candidate in candidates.items():
            # 基础融合分数
            fusion_score = (self.vector_weight * candidate['vector_score'] + 
                          self.bm25_weight * candidate['bm25_score'])
            
            # 多源奖励：如果结果在两种检索方法中都出现，给予额外分数
            if len(candidate['sources']) > 1:
                fusion_score *= 1.2  # 20% 奖励
            
            candidate['fusion_score'] = fusion_score
            
            # 设置source标记
            if 'vector' in candidate['sources'] and 'bm25' in candidate['sources']:
                candidate['source'] = 'hybrid'
            elif 'vector' in candidate['sources']:
                candidate['source'] = 'vector'
            else:
                candidate['source'] = 'bm25'
        
        # 按融合分数排序
        candidate_list = list(candidates.values())
        candidate_list.sort(key=lambda x: x['fusion_score'], reverse=True)
        
        # 使用贪心算法选择多样化的结果
        selected_results = []
        
        for candidate in candidate_list:
            if len(selected_results) >= limit:
                break
            
            # 计算多样性分数
            diversity_score = self.calculate_diversity_score(candidate, selected_results)
            
            # 最终分数 = 融合分数 + 多样性奖励
            final_score = candidate['fusion_score'] + self.diversity_factor * diversity_score
            candidate['final_score'] = final_score
            
            # 如果是前几个结果，或者多样性分数足够高，就选择
            if len(selected_results) < 2 or diversity_score > 0.3:
                selected_results.append(candidate)
        
        # 如果选择的结果不够，补充剩余的高分候选
        if len(selected_results) < limit:
            remaining_candidates = [c for c in candidate_list if c not in selected_results]
            remaining_candidates.sort(key=lambda x: x['fusion_score'], reverse=True)
            
            for candidate in remaining_candidates:
                if len(selected_results) >= limit:
                    break
                selected_results.append(candidate)
        
        # 按最终分数重新排序
        selected_results.sort(key=lambda x: x.get('final_score', x['fusion_score']), reverse=True)
        
        # 清理临时字段，保留必要信息
        for result in selected_results:
            result.pop('sources', None)
            result.pop('normalized_vector_score', None)
            result.pop('normalized_bm25_score', None)
            result.pop('final_score', None)
        
        return selected_results[:limit]

    def explain_search(self, query: str, query_vector: np.ndarray, limit: int = 3) -> Dict[str, Any]:
        """
        执行混合搜索并返回详细的解释信息，用于调试和分析
        
        Args:
            query: 文本查询
            query_vector: 查询向量
            limit: 返回的最大结果数
            
        Returns:
            包含搜索结果和解释信息的字典
        """
        # 获取各种检索结果
        vector_results = self.vector_search(query_vector, limit)
        bm25_results = self.bm25_retriever.search(query, limit)
        hybrid_results = self.hybrid_search(query, query_vector, limit)
        
        return {
            'query': query,
            'vector_results': vector_results,
            'bm25_results': bm25_results,
            'hybrid_results': hybrid_results,
            'explanation': {
                'vector_weight': self.vector_weight,
                'bm25_weight': self.bm25_weight,
                'diversity_factor': self.diversity_factor,
                'total_documents': len(self.metadata)
            }
        }