from typing import List, Dict, Any
import re
import os

class ResultFormatter:
    @staticmethod
    def format_search_results(reranked_results: List[Dict[str, Any]], limit: int = 10, query: str = "") -> List[Dict[str, Any]]:
        """格式化搜索结果，方便前端展示"""
        formatted_results = []
        
        for i, result in enumerate(reranked_results[:limit]):
            source = result.get("_source", {})
            title = source.get("title", "")
            summary = source.get("summary_text", "")
            
            # 确定匹配类型和高亮内容
            match_type = "未知匹配"
            matched_content = ""
            
            if query:
                # 检查是标题匹配还是内容匹配
                if query.lower() in title.lower():
                    match_type = "标题匹配"
                    matched_content = ResultFormatter._highlight_match(title, query)
                else:
                    match_type = "内容匹配"
                    matched_content = ResultFormatter._highlight_match(summary, query)
                    # 提取匹配上下文
                    context = ResultFormatter._extract_context(summary, query)
                    if context:
                        matched_content = context
            
            formatted_results.append({
                "rank": i + 1,
                "file_name": source.get("filename", ""),
                "file_url": source.get("file_url", ""),
                "title": title,
                "summary": summary[:150] + ("..." if len(summary) > 150 else ""),
                "relevance_score": round(result.get("rerank_score", 0.0), 4),
                "file_type": source.get("suffix", ""),
                "match_type": match_type,
                "matched_content": matched_content
            })
        
        return formatted_results
    
    @staticmethod
    def format_local_results(local_results: List[Dict], limit: int = 10) -> List[Dict[str, Any]]:
        """格式化本地知识库搜索结果，支持显示上一段内容并跳过重复内容，处理相邻段落合并"""
        formatted_results = []
        
        # 过滤掉相邻段落（当启用了合并功能时）
        results_to_format = []
        for result in local_results[:limit*2]:  # 获取更多结果用于处理
            # 如果启用了合并相邻段落且该段落是相邻段落，且主段落已经包含它，则跳过
            if result.get('full_content_merge_neigh', True) and result.get('is_adjacent', False) and result.get('has_adjacent', False):
                continue
            results_to_format.append(result)
            # 如果已经收集了足够的结果，停止处理
            if len(results_to_format) >= limit:
                break
        
        for i, result in enumerate(results_to_format[:limit]):
            file_name = result.get("file_name", "")
            content = result.get("content", "")
            full_content = result.get("full_content", content)  # 使用包含上一段的完整内容
            query = result.get("query", "")  # 查询词
            page_num = result.get("page_num", 1)
            para_num = result.get("para_num", 1)
            has_previous = result.get("has_previous", False)  # 是否包含上一段
            has_adjacent = result.get("has_adjacent", False)  # 是否包含相邻段落
            
            # 确定匹配类型
            match_type = "内容匹配"
            if result.get("is_adjacent", False):
                match_type = "相邻段落"
            elif result.get("is_overlap", False):
                match_type = "重叠内容"
            elif result.get("below_threshold", False):
                match_type = "低相关性匹配"
            
            # 处理文件名匹配
            if query and query.lower() in file_name.lower():
                match_type = "文件名匹配"
                highlighted_name = ResultFormatter._highlight_match(file_name, query)
                matched_content = f"文件名: {highlighted_name}"
            else:
                # 提取并高亮显示匹配内容
                context = ResultFormatter._extract_context(content, query, max_length=300)
                if context:
                    matched_content = context
                else:
                    # 如果没有明确匹配，使用内容前300个字符
                    matched_content = content[:300] + ("..." if len(content) > 300 else "")
                    
                # 如果是相邻段落且未被合并，标记显示
                if result.get("is_adjacent", False) and not result.get('full_content_merge_neigh', True):
                    matched_content = f"[相邻段落] {matched_content}"
                
                # 如果是重叠段落，标记显示
                if result.get("is_overlap", False):
                    matched_content = f"[重叠内容] {matched_content}"
                
                # 如果包含精确查询，标记显示
                if result.get("contains_exact_query", False):
                    matched_content = f"[精确匹配] {matched_content}"
                
                # 如果包含上一段内容，标记显示
                if has_previous and not result.get("is_overlap", False) and not result.get("is_adjacent", False):
                    matched_content = f"[包含上下文] {matched_content}"
                
                # 如果包含相邻段落，标记显示
                if has_adjacent:
                    matched_content = f"[包含相邻段落] {matched_content}"
            
            # 创建标题，包含更多信息
            title_suffix = f"(第{page_num}页 段落{para_num})"
            if result.get("is_adjacent", False) and not result.get('full_content_merge_neigh', True):
                title_suffix += " [相邻段落]"
            elif result.get("is_overlap", False):
                title_suffix += " [重叠内容]"
            elif has_previous and not result.get("is_overlap", False) and not result.get("is_adjacent", False):
                title_suffix += " [包含上下文]"
            elif has_adjacent:
                title_suffix += " [包含相邻段落]"
                
            # 构建结果
            formatted_result = {
                "rank": i + 1,
                "file_name": file_name,
                "file_url": result.get("file_path", ""),
                "title": f"{file_name} {title_suffix}",
                "summary": matched_content[:200] + ("..." if len(matched_content) > 200 else ""),  # 摘要限制200字符
                "relevance_score": round(result.get("similarity", 0.0), 4),
                "file_type": os.path.splitext(file_name)[1][1:].upper() if '.' in file_name else "",
                "match_type": match_type,
                "page_num": page_num,
                "para_num": para_num,
                "matched_content": matched_content,
                "full_content": full_content  # 完整内容，包括可能的上一段和相邻段落
            }
            
            # 对于低于阈值的结果，添加标记
            if result.get("below_threshold", False):
                formatted_result["below_threshold"] = True
            
            formatted_results.append(formatted_result)
            
        return formatted_results
    
    @staticmethod
    def _highlight_match(text: str, query: str) -> str:
        """高亮显示匹配的文本"""
        if not query or not text:
            return text
        
        # 将查询词分割成词语
        query_terms = query.lower().split()
        highlighted = text
        
        # 高亮每个查询词
        for term in query_terms:
            if term and len(term) > 1:  # 忽略单字符词
                pattern = re.compile(re.escape(term), re.IGNORECASE)
                highlighted = pattern.sub(f"【{term}】", highlighted)
        
        return highlighted
    
    @staticmethod
    def _extract_context(text: str, query: str, max_length: int = 300) -> str:
        """提取包含查询词的上下文，增强中文支持"""
        if not query or not text:
            return text[:max_length] + ("..." if len(text) > max_length else "")
        
        # 拆分查询词
        query_terms = []
        # 处理中文查询词 - 尝试使用jieba分词
        try:
            import jieba
            query_terms = jieba.lcut(query.lower())
        except ImportError:
            # 如果没有jieba，按空格分割
            query_terms = query.lower().split()
        
        # 过滤掉太短的词
        query_terms = [term for term in query_terms if len(term) > 1]
        
        if not query_terms:
            # 如果没有有效的查询词，返回前max_length个字符
            return text[:max_length] + ("..." if len(text) > max_length else "")
        
        text_lower = text.lower()
        
        # 尝试精确匹配完整查询
        full_query_pos = text_lower.find(query.lower())
        if full_query_pos != -1:
            # 找到完整匹配，围绕它提取上下文
            start = max(0, full_query_pos - max_length // 4)
            end = min(len(text), full_query_pos + len(query) + max_length * 3 // 4)
        else:
            # 找出所有查询词的匹配位置
            best_matches = []
            for term in query_terms:
                pos = text_lower.find(term)
                if pos != -1:
                    best_matches.append((pos, term))
            
            if not best_matches:
                # 如果没有找到匹配，返回前max_length个字符
                return text[:max_length] + ("..." if len(text) > max_length else "")
            
            # 按位置排序匹配
            best_matches.sort()
            
            # 如果有多个匹配，尝试找到最集中的匹配区域
            if len(best_matches) > 1:
                # 计算匹配词之间的距离，找到最密集的区域
                min_span = float('inf')
                best_start = 0
                
                for i in range(len(best_matches)):
                    for j in range(i + 1, min(i + 3, len(best_matches))):  # 只考虑相邻的几个匹配
                        span = best_matches[j][0] - best_matches[i][0]
                        if span < min_span and span < max_length // 2:
                            min_span = span
                            best_start = best_matches[i][0]
                
                if min_span < float('inf'):
                    # 找到了密集匹配区域
                    start = max(0, best_start - max_length // 4)
                    end = min(len(text), best_start + min_span + max_length // 2)
                else:
                    # 没有找到密集区域，使用第一个匹配
                    best_pos = best_matches[0][0]
                    start = max(0, best_pos - max_length // 4)
                    end = min(len(text), best_pos + max_length * 3 // 4)
            else:
                # 只有一个匹配
                best_pos = best_matches[0][0]
                start = max(0, best_pos - max_length // 4)
                end = min(len(text), best_pos + max_length * 3 // 4)
        
        # 调整开始位置，尽量不切断句子
        if start > 0:
            # 向前找到一个句号或其他标点
            for i in range(start, max(0, start - 50), -1):
                if text[i] in "。.!?！？":
                    start = i + 1
                    break
            
            context = ("..." if start > 0 else "") + text[start:end]
        else:
            context = text[start:end]
        
        # 如果上下文不是在文本末尾结束，添加省略号
        if end < len(text):
            context += "..."
        
        # 高亮匹配词 - 优先高亮完整查询
        if full_query_pos != -1:
            pattern = re.compile(re.escape(query), re.IGNORECASE)
            highlighted = pattern.sub(f"【{query}】", context)
        else:
            # 高亮各个查询词
            highlighted = context
            for term in sorted(query_terms, key=len, reverse=True):  # 从最长的词开始处理
                if len(term) > 1:  # 忽略单字符词
                    pattern = re.compile(re.escape(term), re.IGNORECASE)
                    highlighted = pattern.sub(f"【{term}】", highlighted)
        
        return highlighted 