from fastapi import APIRouter, UploadFile, File, Form, HTTPException, Depends, Query, BackgroundTasks
from fastapi.responses import JSONResponse
import os
import re
import fitz  # PyMuPDF
from docx import Document
from pptx import Presentation
import markdown
from bs4 import BeautifulSoup
import epub
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
import tempfile
import shutil
from pathlib import Path
from typing import List, Dict, Tuple, Optional, Union
import asyncio
import time
import json
import uuid
import threading
from collections import defaultdict

# 自定义模块导入
from kg_retrieval.async_tasks import TaskManager, submit_task, get_task_status
from kg_retrieval.env_config import MODEL_CONFIG, ES_CONFIG, KNOWLEDGE_BASE_DIR, LOGS_DIR
from kg_retrieval.monitoring import performance_monitor
from kg_retrieval.optimized_embedding import OptimizedEmbedding
from kg_retrieval.optimized_search import OptimizedSearch
from kg_retrieval.optimized_rerank import OptimizedReranker
from kg_retrieval.es_client import get_es_client
from kg_retrieval.result_formatter import ResultFormatter
from kg_retrieval.cache_manager import CacheManager

# 创建共享模型的全局注册表
class ModelRegistry:
    _instance = None
    
    @classmethod
    def get_instance(cls):
        if cls._instance is None:
            cls._instance = ModelRegistry()
        return cls._instance
    
    def __init__(self):
        self.embedding_model = None
        self.rerank_model = None

# 获取模型注册表单例
model_registry = ModelRegistry.get_instance()

router_kg = APIRouter()

# 创建存储上传文件的目录
UPLOAD_DIR = Path(KNOWLEDGE_BASE_DIR)
UPLOAD_DIR.mkdir(exist_ok=True)

# 创建日志目录
logs_dir = Path(LOGS_DIR)
logs_dir.mkdir(exist_ok=True)

# 创建任务管理器
task_manager = TaskManager()

# 知识库管理
knowledge_bases = {}

def get_knowledge_base(kb_name: str = "default", use_overlap_segments: bool = False):
    """获取或创建指定名称的知识库"""
    kb_key = f"{kb_name}_{use_overlap_segments}"
    if kb_key not in knowledge_bases:
        # 创建知识库目录
        kb_dir = os.path.join(KNOWLEDGE_BASE_DIR, kb_name)
        os.makedirs(kb_dir, exist_ok=True)
        kb = KnowledgeBase(use_overlap_segments=use_overlap_segments)
        kb.load_documents_from_directory(kb_dir)
        knowledge_bases[kb_key] = kb
    
    return knowledge_bases[kb_key]

# 需要延迟初始化的组件
result_formatter = ResultFormatter()
cache_mgr = CacheManager()

# 使用模型注册表获取模型
def get_optimized_embedder():
    """获取嵌入模型，优先使用预加载的模型"""
    if model_registry.embedding_model is not None:
        return model_registry.embedding_model
    else:
        print("警告：嵌入模型未初始化，创建新实例")
        model_registry.embedding_model = OptimizedEmbedding()
        return model_registry.embedding_model

def get_optimized_search():
    """获取搜索组件，使用预加载的嵌入模型"""
    embedder = get_optimized_embedder()
    return OptimizedSearch(emb_model=embedder)

def get_optimized_rerank():
    """获取重排序模型，优先使用预加载的模型"""
    if model_registry.rerank_model is not None:
        return model_registry.rerank_model
    else:
        print("警告：重排序模型未初始化，创建新实例")
        model_registry.rerank_model = OptimizedReranker()
        return model_registry.rerank_model

# 暴露方法供main.py在启动时设置模型
def set_models(embedding_model=None, rerank_model=None):
    """设置预加载的模型到模型注册表"""
    if embedding_model:
        model_registry.embedding_model = embedding_model
        print("已设置预加载的嵌入模型到模型注册表")
    
    if rerank_model:
        model_registry.rerank_model = rerank_model
        print("已设置预加载的重排序模型到模型注册表")

# 预加载所有组件
def preload_components():
    """启动时预加载所有模型组件，提高首次请求的响应速度"""
    print("开始预加载模型组件...")
    try:
        # 检查嵌入模型
        print("检查嵌入模型...")
        embedder = get_optimized_embedder()
        
        # 检查重排序模型
        print("检查重排序模型...")
        reranker = get_optimized_rerank()
        
        # 初始化搜索组件
        print("初始化搜索组件...")
        search = get_optimized_search()
        
        print("所有组件预加载检查完成！")
    except Exception as e:
        print(f"预加载组件时出错: {e}")
        print("将在首次请求时再加载组件")

@router_kg.get("/")
def index():
    return {"message": "知识库API服务"}

class DocumentLoader:
    """加载和处理各种格式的文档"""
    
    @staticmethod
    @performance_monitor.measure(name="load_pdf")
    def load_pdf(file_path: str) -> List[str]:
        """加载PDF文件并提取文本"""
        text = []
        try:
            with fitz.open(file_path) as doc:
                for page in doc:
                    text.append(page.get_text())
        except Exception as e:
            print(f"Error loading PDF {file_path}: {e}")
        return text
    
    @staticmethod
    @performance_monitor.measure(name="load_txt")
    def load_txt(file_path: str) -> List[str]:
        """加载TXT文件"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                return [f.read()]
        except UnicodeDecodeError:
            try:
                with open(file_path, 'r', encoding='gbk') as f:
                    return [f.read()]
            except Exception as e:
                print(f"Error loading TXT {file_path}: {e}")
                return []
        except Exception as e:
            print(f"Error loading TXT {file_path}: {e}")
            return []
    
    @staticmethod
    @performance_monitor.measure(name="load_docx")
    def load_docx(file_path: str) -> List[str]:
        """加载DOCX文件"""
        try:
            doc = Document(file_path)
            return [paragraph.text for paragraph in doc.paragraphs if paragraph.text.strip()]
        except Exception as e:
            print(f"Error loading DOCX {file_path}: {e}")
            return []
    
    @staticmethod
    @performance_monitor.measure(name="load_pptx")
    def load_pptx(file_path: str) -> List[str]:
        """加载PPTX文件"""
        try:
            prs = Presentation(file_path)
            text = []
            for slide in prs.slides:
                for shape in slide.shapes:
                    if hasattr(shape, "text"):
                        text.append(shape.text)
            return text
        except Exception as e:
            print(f"Error loading PPTX {file_path}: {e}")
            return []
    
    @staticmethod
    def load_epub(file_path: str) -> List[str]:
        """加载EPUB文件"""
        try:
            import ebooklib
            from ebooklib import epub as epub_lib
            
            text = []
            book = epub_lib.read_epub(file_path)
            for item in book.get_items():
                if item.get_type() == ebooklib.ITEM_DOCUMENT:
                    soup = BeautifulSoup(item.get_content(), 'html.parser')
                    text.append(soup.get_text())
            return text
        except ImportError:
            print(f"Error loading EPUB {file_path}: ebooklib not installed")
            return []
        except Exception as e:
            print(f"Error loading EPUB {file_path}: {e}")
            return []
    
    @staticmethod
    @performance_monitor.measure(name="load_md")
    def load_md(file_path: str) -> List[str]:
        """加载Markdown文件"""
        try:
            with open(file_path, 'r', encoding='utf-8') as f:
                md_text = f.read()
                html = markdown.markdown(md_text)
                soup = BeautifulSoup(html, 'html.parser')
                return [soup.get_text()]
        except Exception as e:
            print(f"Error loading MD {file_path}: {e}")
            return []

class KnowledgeBase:
    """知识库类，用于加载文档和进行检索"""
    
    def __init__(self, use_overlap_segments: bool = False):
        self.documents = []  # 存储所有文档段落
        self.document_info = []  # 存储每个段落的元信息
        self.vectorizer = TfidfVectorizer()
        self.tfidf_matrix = None
        self.use_overlap_segments = use_overlap_segments  # 是否使用重叠分段
    
    def _clean_text(self, text):
        """智能清理文本，保留关键语义信息"""
        if not text:
            return ""
            
        # 替换常见中文标点为空格，而不是完全移除
        # 这样可以保持词与词之间的分隔，有助于分词
        text = re.sub(r'[，。！？、；：""''（）【】《》]', ' ', text)
        
        # 保留英文标点中的点和问号，它们可能是重要的语义分隔符
        text = re.sub(r'[,!;:\"\'\(\)\[\]—…·]', ' ', text)
        
        # 替换多个空格为单个空格
        text = re.sub(r'\s+', ' ', text)
        
        # 对于重复的词语（可能是强调），保留至少一次
        # 例如"很很很好"变成"很好"
        text = re.sub(r'([一-龥])\1+', r'\1', text)
        
        return text.strip()

    @performance_monitor.measure(name="load_documents_from_directory")
    def load_documents_from_directory(self, directory: str):
        """从目录加载所有支持的文档"""
        supported_extensions = {
            '.pdf': DocumentLoader.load_pdf,
            '.txt': DocumentLoader.load_txt,
            '.docx': DocumentLoader.load_docx,
            '.pptx': DocumentLoader.load_pptx,
            '.epub': DocumentLoader.load_epub,
            '.md': DocumentLoader.load_md
        }
        
        for root, _, files in os.walk(directory):
            for file in files:
                file_path = os.path.join(root, file)
                self._load_single_file(file_path, supported_extensions)
        
        # 训练TF-IDF向量化器
        self._update_tfidf_model()
    
    def _load_single_file(self, file_path: str, supported_extensions: dict = None) -> bool:
        """根据文件大小和内容智能选择分段策略，确保小文件内容也能被精确检索"""
        if supported_extensions is None:
            supported_extensions = {
                '.pdf': DocumentLoader.load_pdf,
                '.txt': DocumentLoader.load_txt,
                '.docx': DocumentLoader.load_docx,
                '.pptx': DocumentLoader.load_pptx,
                '.epub': DocumentLoader.load_epub,
                '.md': DocumentLoader.load_md
            }
        file_ext = os.path.splitext(file_path)[1].lower()
        file_name = os.path.basename(file_path)
        if file_ext not in supported_extensions:
            return False
        if os.path.getsize(file_path) > 30 * 1024 * 1024:
            print(f"文件 {file_path} 超过30MB限制，跳过")
            return False
        
        loader_func = supported_extensions[file_ext]
        content = loader_func(file_path)
        file_size = os.path.getsize(file_path)
        is_small = file_size < 200 * 1024  # 200KB
        
        for page_num, page_content in enumerate(content, 1):
            # 无论文件大小，先尝试按空行分段
            paragraphs = re.split(r'\n\s*\n', page_content)
            
            # 如果只有一个段落，且内容较长，则按行分割
            if len(paragraphs) == 1 and len(page_content) > 500:
                paragraphs = page_content.split('\n')
            
            # 如果分段后仍然只有少量长段落，使用句子级分段
            if len(paragraphs) <= 3 and any(len(p) > 1000 for p in paragraphs):
                sentence_paragraphs = []
                for p in paragraphs:
                    # 使用中英文标点符号分割句子
                    sentences = re.split(r'(?<=[。！？.!?])\s*', p)
                    sentence_paragraphs.extend([s.strip() for s in sentences if s.strip()])
                paragraphs = sentence_paragraphs
            
            # 处理短段合并的情况（小于50字符的段落与相邻段落合并）
            merged_paragraphs = []
            current_para = ""
            
            for para in paragraphs:
                para = para.strip()
                if not para:
                    continue
                
                if len(para) < 50 and current_para:
                    current_para += " " + para
                else:
                    if current_para:
                        merged_paragraphs.append(current_para)
                    current_para = para
            
            if current_para:  # 添加最后一个段落
                merged_paragraphs.append(current_para)
            
            # 如果合并后段落太少，使用原始段落
            if len(merged_paragraphs) < len(paragraphs) / 2 and len(paragraphs) > 1:
                merged_paragraphs = [p for p in paragraphs if p.strip()]
            
            # 根据配置决定是否使用重叠分段
            if self.use_overlap_segments and is_small and len(merged_paragraphs) > 3:
                # 创建重叠分段以提高检索效果
                final_paragraphs = []
                overlap_window = 2  # 重叠窗口大小
                
                # 小文件有多个段落时，添加重叠段落
                for i in range(len(merged_paragraphs)):
                    # 添加原始段落
                    final_paragraphs.append(merged_paragraphs[i])
                    
                    # 创建与前一段的重叠段落
                    if i > 0:
                        overlap = merged_paragraphs[i-1] + " " + merged_paragraphs[i]
                        final_paragraphs.append(overlap)
            else:
                # 不使用重叠分段，直接使用合并后的段落
                final_paragraphs = merged_paragraphs
            
            # 添加到文档集合，同时为每个段落保存上一段内容
            previous_paragraph = ""  # 记录上一段内容
            
            for para_num, paragraph in enumerate(final_paragraphs, 1):
                if paragraph.strip():
                    current_paragraph = paragraph.strip()
                    self.documents.append(current_paragraph)
                    is_overlap = False
                    if self.use_overlap_segments:
                        is_overlap = para_num > len(merged_paragraphs)  # 标记是否为重叠段落
                    
                    # 对于重叠段落，不需要记录上一段，因为它已经包含了
                    prev_paragraph = "" if is_overlap else previous_paragraph
                    
                    self.document_info.append({
                        'file_name': file_name,
                        'file_path': file_path,
                        'page_num': page_num,
                        'para_num': para_num,
                        'is_overlap': is_overlap,
                        'previous_paragraph': prev_paragraph  # 保存上一段内容
                    })
                    
                    # 更新上一段内容
                    previous_paragraph = current_paragraph
        
        return True
    
    def _update_tfidf_model(self):
        """更新TF-IDF模型，强制使用jieba分词"""
        if self.documents:
            try:
                import jieba
                self.vectorizer = TfidfVectorizer(tokenizer=jieba.lcut, lowercase=False)
            except ImportError:
                self.vectorizer = TfidfVectorizer()
            # 清理标点后再向量化
            docs = [self._clean_text(doc) for doc in self.documents]
            self.tfidf_matrix = self.vectorizer.fit_transform(docs)
    
    @performance_monitor.measure(name="load_file")
    def load_file(self, file_path: str) -> bool:
        """加载单个文件到知识库并更新TF-IDF模型"""
        success = self._load_single_file(file_path)
        if success:
            self._update_tfidf_model()
        return success
    
    @performance_monitor.measure(name="search")
    def search(self, query: str, threshold: float = 0.2, top_n: int = 5, debug: bool = False) -> List[Dict]:
        """在知识库中搜索与查询最相似的段落，增强检索效果"""
        if not self.documents or self.tfidf_matrix is None:
            return []
        
        # 清理查询内容
        cleaned_query = self._clean_text(query)
        
        # 使用jieba分词增强中文查询效果
        try:
            import jieba
            # 中文分词后空格连接，保持与TF-IDF向量化器一致
            query_words = " ".join(jieba.lcut(cleaned_query))
            query_to_use = query_words
        except ImportError:
            query_to_use = cleaned_query
        
        # 尝试向量化查询
        try:
            query_vec = self.vectorizer.transform([query_to_use])
        except Exception as e:
            print(f"向量化查询失败: {e}")
            return []
        
        # 计算余弦相似度
        similarities = cosine_similarity(query_vec, self.tfidf_matrix).flatten()
        
        # 收集所有相似度
        all_sim = [(idx, float(sim)) for idx, sim in enumerate(similarities)]
        all_sim_sorted = sorted(all_sim, key=lambda x: x[1], reverse=True)
        
        if debug:
            print(f"查询：{query}")
            print(f"分词后查询：{query_to_use}")
            print(f"文档总数：{len(self.documents)}")
            print(f"最高相似度：{all_sim_sorted[0][1] if all_sim_sorted else 0}")
            print(f"阈值：{threshold}")
        
        # 获取超过阈值的索引
        top_indices = [idx for idx, sim in all_sim_sorted[:top_n*2] if sim > threshold]
        
        # 如果结果太少，降低阈值尝试获取更多结果
        if len(top_indices) < 2 and threshold > 0.1 and all_sim_sorted:
            lowered_threshold = max(0.1, all_sim_sorted[0][1] * 0.8)  # 最高相似度的80%，但不低于0.1
            top_indices = [idx for idx, sim in all_sim_sorted[:top_n*2] if sim > lowered_threshold]
            if debug and len(top_indices) > 0:
                print(f"自动降低阈值至 {lowered_threshold}，找到 {len(top_indices)} 个结果")
        
        # 增加相邻段落的检索：如果某段落匹配度高，加入其前后段落
        adjacent_indices = set()
        for idx in top_indices.copy():
            # 获取当前段落信息
            current_info = self.document_info[idx]
            
            # 查找同一文件、同一页的相邻段落
            for i, info in enumerate(self.document_info):
                if (info['file_path'] == current_info['file_path'] and 
                    info['page_num'] == current_info['page_num'] and
                    abs(info['para_num'] - current_info['para_num']) == 1):
                    adjacent_indices.add(i)
        
        # 合并相邻段落索引
        for idx in adjacent_indices:
            if idx not in top_indices:
                top_indices.append(idx)
        
        # 解析查询词汇，用于后续匹配
        query_terms = set(query.lower().split())
        
        # 最终结果集
        results = []
        
        # 用于去重的内容哈希集合
        content_set = set()
        
        # 处理所有候选段落
        for idx in top_indices:
            # 获取当前段落内容
            current_content = self.documents[idx]
            
            # 如果内容已经在结果中，跳过
            content_hash = hash(current_content)
            if content_hash in content_set:
                continue
            
            # 否则添加到去重集合
            content_set.add(content_hash)
            
            # 获取上一段内容，构建完整内容
            previous_paragraph = self.document_info[idx].get('previous_paragraph', '')
            
            # 如果是重叠段落，或者相邻段落，则不需要单独显示上一段
            is_overlap = self.document_info[idx].get('is_overlap', False)
            is_adjacent = idx in adjacent_indices
            
            # 如果不是重叠段落且不是相邻段落且有上一段内容，则在full_content中包含上一段
            full_content = current_content
            if previous_paragraph and not is_overlap and not is_adjacent:
                full_content = previous_paragraph + "\n\n" + current_content
            
            # 确定匹配类型
            match_type = "内容匹配"
            if is_adjacent:
                match_type = "相邻段落"
            elif is_overlap:
                match_type = "重叠内容"
            
            # 检查文件名匹配
            file_name = self.document_info[idx]['file_name'].lower()
            if any(term.lower() in file_name for term in query.lower().split() if len(term) > 1):
                match_type = "文件名匹配"
            
            # 创建结果字典
            result = {
                'content': current_content,
                'full_content': full_content,  # 包含上一段的完整内容
                'similarity': float(similarities[idx]),
                'file_name': self.document_info[idx]['file_name'],
                'file_path': self.document_info[idx]['file_path'],
                'page_num': self.document_info[idx]['page_num'],
                'para_num': self.document_info[idx]['para_num'],
                'is_overlap': is_overlap,
                'has_previous': bool(previous_paragraph),  # 标记是否有上一段
                'query_terms': list(query_terms),
                'match_type': match_type  # 添加匹配类型
            }
            
            # 对于重叠段落，调整相似度评分
            if result['is_overlap'] and result['similarity'] < threshold:
                # 不添加低于阈值的重叠段落
                continue
            
            # 添加是否为相邻段落的标志
            result['is_adjacent'] = is_adjacent
            
            # 精确匹配加分：如果段落中包含完整的查询词，提高其相似度
            content_lower = result['content'].lower()
            if query.lower() in content_lower:
                result['contains_exact_query'] = True
                result['similarity'] = min(1.0, result['similarity'] * 1.3)  # 提高30%的相似度，但不超过1
                result['match_type'] = "精确匹配"  # 更新匹配类型
            
            # 将结果添加到列表中
            results.append(result)
        
        # 根据相似度排序
        results = sorted(results, key=lambda x: x['similarity'], reverse=True)
        
        # 限制结果数量
        final_results = results[:top_n]
        
        # 如果没有找到结果，返回相似度最高的前几个文档，即使低于阈值
        if not final_results and all_sim_sorted:
            if debug:
                closest = all_sim_sorted[:3]
                print(f"未找到超过阈值的结果，返回最接近的结果:")
                for idx, sim in closest:
                    print(f"  {idx}: {sim} - {self.documents[idx][:50]}...")
            
            # 仅在需要时添加最接近的结果，并确保去重
            content_set.clear()
            for idx, sim in all_sim_sorted[:min(3, top_n)]:
                # 检查内容是否重复
                content_hash = hash(self.documents[idx])
                if content_hash in content_set:
                    continue
                content_set.add(content_hash)
                
                # 获取上一段内容
                previous_paragraph = self.document_info[idx].get('previous_paragraph', '')
                
                # 构建完整内容
                full_content = self.documents[idx]
                if previous_paragraph:
                    full_content = previous_paragraph + "\n\n" + self.documents[idx]
                
                # 确定匹配类型
                match_type = "低相关性匹配"
                
                # 检查文件名匹配
                file_name = self.document_info[idx]['file_name'].lower()
                if any(term.lower() in file_name for term in query.lower().split() if len(term) > 1):
                    match_type = "文件名匹配(低相关)"
                
                result = {
                    'content': self.documents[idx],
                    'full_content': full_content,
                    'similarity': float(sim),
                    'file_name': self.document_info[idx]['file_name'],
                    'file_path': self.document_info[idx]['file_path'],
                    'page_num': self.document_info[idx]['page_num'],
                    'para_num': self.document_info[idx]['para_num'],
                    'has_previous': bool(previous_paragraph),
                    'query_terms': list(query_terms),
                    'below_threshold': True,  # 标记为低于阈值
                    'match_type': match_type  # 添加匹配类型
                }
                final_results.append(result)
        
        return final_results

# API端点：加载文件到知识库
@router_kg.post("/load_file")
@performance_monitor.measure(name="load_file_api")
async def load_file(
    file: UploadFile = File(...),
    kb_name: str = Form("default"),
    use_overlap_segments: bool = Form(False),
    background_tasks: BackgroundTasks = None
):
    """
    上传文件到知识库
    - file: 要上传的文件
    - kb_name: 知识库名称，默认为default
    - use_overlap_segments: 是否使用重叠分段，默认为False
    """
    start_time = time.time()
    
    # 检查文件扩展名
    supported_extensions = ['.pdf', '.txt', '.docx', '.pptx', '.epub', '.md']
    file_extension = os.path.splitext(file.filename)[1].lower()
    
    if file_extension not in supported_extensions:
        raise HTTPException(
            status_code=400, 
            detail=f"不支持的文件类型。支持的类型: {', '.join(supported_extensions)}"
        )
    
    kb_dir = os.path.join(KNOWLEDGE_BASE_DIR, kb_name)
    os.makedirs(kb_dir, exist_ok=True)
    file_path = os.path.join(kb_dir, file.filename)
    with open(file_path, "wb") as buffer:
        shutil.copyfileobj(file.file, buffer)
    kb = get_knowledge_base(kb_name, use_overlap_segments)
    success = kb.load_file(file_path)
    
    if not success:
        # 如果加载失败，删除文件
        if file_path.exists():
            os.remove(file_path)
        raise HTTPException(status_code=500, detail="文件处理失败")
    
    duration_ms = round((time.time() - start_time) * 1000, 2)
    
    return JSONResponse(
        content={
            "status": "success", 
            "message": f"文件 '{file.filename}' 已成功加载到知识库",
            "file_path": str(file_path),
            "overlap_segments": use_overlap_segments,
            "processing_time_ms": duration_ms
        }
    )

# API端点：获取异步任务状态
@router_kg.get("/task/{task_id}")
async def check_task_status(task_id: str):
    """获取任务处理状态"""
    return get_task_status(task_id)

# API端点：搜索知识库
@router_kg.get("/search")
@performance_monitor.measure(name="search_api")
async def search(
    query: str,
    kb_name: str = Query("default"),
    threshold: float = Query(0.2, ge=0.0, le=1.0),
    top_n: int = Query(5, ge=1, le=20),
    use_cache: bool = Query(True),
    search_mode: str = Query("local", enum=["local", "optimized"]),
    use_overlap_segments: bool = Query(False),
    full_content_merge_neigh: bool = Query(True),
    company_id: Optional[str] = None
):
    """
    在知识库中搜索与查询相似的内容
    - query: 查询内容
    - kb_name: 知识库名称，默认为default
    - threshold: 相似度阈值，大于此值的内容才会被返回
    - top_n: 返回的最大结果数量
    - use_cache: 是否使用缓存
    - search_mode: 搜索模式，local使用本地知识库，optimized使用优化的检索
    - use_overlap_segments: 是否使用重叠分段，默认为False
    - full_content_merge_neigh: 是否将相邻段落合并到主要段落的full_content中，默认为True
    - company_id: 公司ID，仅在optimized模式下有效
    """
    start_time = time.time()
    timing = {"total": 0, "cache_check": 0, "search": 0, "rerank": 0, "format": 0}
    match_type = "未知"  # 初始匹配类型
    
    # 尝试从缓存获取
    cache_start = time.time()
    if use_cache:
        cache_key = cache_mgr.get_cache_key(query, company_id if search_mode == "optimized" else None, use_overlap_segments, full_content_merge_neigh)
        cached_result = cache_mgr.get(cache_key)
        if cached_result:
            return {
                "status": "success",
                "source": "cache",
                "query": query,
                "results_count": len(cached_result.get("results", [])),
                "results": cached_result.get("results", []),
                "processing_time_ms": 0,
                "match_type": cached_result.get("match_type", "缓存结果")
            }
    timing["cache_check"] = round((time.time() - cache_start) * 1000, 2)
    
    try:
        if search_mode == "optimized":
            # 使用优化的检索功能
            match_type = "优化检索"
            search_start = time.time()
            search_result = await get_optimized_search().search(
                query=query,
                company_id=company_id,
                limit=20  # 获取更多结果用于重排
            )
            timing["search"] = round((time.time() - search_start) * 1000, 2)
            
            # 重排序结果
            rerank_start = time.time()
            reranked_results = get_optimized_rerank().rerank(
                query=query,
                results=search_result.get("results", []),
                top_n=20
            )
            timing["rerank"] = round((time.time() - rerank_start) * 1000, 2)
            
            # 格式化结果 - 传递查询词
            format_start = time.time()
            results = result_formatter.format_search_results(
                reranked_results=reranked_results,
                limit=top_n,
                query=query  # 传递查询词
            )
            timing["format"] = round((time.time() - format_start) * 1000, 2)
        else:
            # 使用本地知识库
            match_type = "本地知识库"
            search_start = time.time()
            kb = get_knowledge_base(kb_name, use_overlap_segments)
            local_results = kb.search(query, threshold=threshold, top_n=top_n*2)  # 获取更多结果，以便处理合并相邻段落
            timing["search"] = round((time.time() - search_start) * 1000, 2)
            
            # 为每个结果添加查询词，以便格式化器使用
            for result in local_results:
                result['query'] = query
                result['full_content_merge_neigh'] = full_content_merge_neigh  # 传递合并相邻段落的设置
            
            # 如果没有找到结果且阈值较高，尝试降低阈值再搜索一次
            lower_threshold_results = []
            if not local_results and threshold > 0.1:
                match_type = "降低阈值检索"
                lower_threshold_results = kb.search(query, threshold=0.1, top_n=top_n*2)
                for result in lower_threshold_results:
                    result['query'] = query
                    result['threshold_lowered'] = True
                    result['full_content_merge_neigh'] = full_content_merge_neigh  # 传递合并相邻段落的设置
            
            # 如果需要合并相邻段落，先处理一下结果
            results_to_format = local_results if local_results else lower_threshold_results
            
            format_start = time.time()
            if full_content_merge_neigh and results_to_format:
                # 合并相邻段落的预处理
                processed_results = []
                adjacent_indices = set()
                
                # 首先，找出所有相邻段落
                for i, result in enumerate(results_to_format):
                    if result.get('is_adjacent', False):
                        adjacent_indices.add(i)
                
                # 然后，将相邻段落的内容合并到主要段落中
                for i, result in enumerate(results_to_format):
                    if i not in adjacent_indices:  # 非相邻段落
                        # 查找这个段落的相邻段落
                        adjacent_content = []
                        for j, adj_result in enumerate(results_to_format):
                            if j in adjacent_indices and adj_result.get('is_adjacent', False):
                                # 检查是否是当前段落的相邻段落
                                if (adj_result['file_path'] == result['file_path'] and 
                                    adj_result['page_num'] == result['page_num'] and
                                    abs(adj_result['para_num'] - result['para_num']) == 1):
                                    adjacent_content.append(adj_result['content'])
                        
                        # 合并内容
                        full_content = result.get('full_content', result['content'])
                        if adjacent_content:
                            # 将相邻段落内容添加到full_content
                            full_content = "\n\n".join([*adjacent_content, result['content']])
                        
                            # 更新结果
                            result['full_content'] = full_content
                            result['has_merged_adjacent'] = bool(adjacent_content)
                        
                        processed_results.append(result)
                
                # 格式化处理后的结果
                results = result_formatter.format_local_results(
                    processed_results,
                    limit=top_n
                )
            else:
                # 不合并相邻段落，正常格式化
                results = result_formatter.format_local_results(
                    results_to_format,
                    limit=top_n
                )
            timing["format"] = round((time.time() - format_start) * 1000, 2)
        
        # 分析结果中的匹配类型
        if results:
            # 收集所有匹配类型
            match_types = [r.get("match_type", "未知匹配") for r in results]
            # 找出最常见的匹配类型
            if match_types:
                from collections import Counter
                common_match_type = Counter(match_types).most_common(1)[0][0]
                match_type = f"{match_type} - {common_match_type}"
        
        response = {
            "status": "success",
            "source": "live",
            "query": query,
            "results_count": len(results),
            "results": results,
            "overlap_segments_used": use_overlap_segments,
            "full_content_merge_neigh": full_content_merge_neigh,
            "match_type": match_type
        }
        
        # 添加提示信息
        if len(results) == 0:
            response["suggestion"] = {
                "message": "未找到任何相关内容，请尝试以下操作：",
                "tips": [
                    "使用不同的关键词搜索",
                    f"降低相似度阈值（当前值：{threshold}）",
                    "检查知识库是否已导入相关文档",
                    "尝试使用更简短、更具体的查询"
                ]
            }
        elif any(r.get('threshold_lowered', False) for r in (lower_threshold_results if 'lower_threshold_results' in locals() else [])):
            response["suggestion"] = {
                "message": "系统已自动降低相似度阈值以找到相关内容",
                "original_threshold": threshold,
                "adjusted_threshold": 0.1
            }
        
        # 缓存结果
        if use_cache:
            cache_key = cache_mgr.get_cache_key(query, company_id if search_mode == "optimized" else None, use_overlap_segments, full_content_merge_neigh)
            cache_mgr.set(cache_key, response)
        
        duration_ms = round((time.time() - start_time) * 1000, 2)
        timing["total"] = duration_ms
        response["processing_time_ms"] = duration_ms
        response["timing"] = timing
        
        return response
    
    except Exception as e:
        duration_ms = round((time.time() - start_time) * 1000, 2)
        return {
            "status": "error",
            "message": str(e),
            "query": query,
            "processing_time_ms": duration_ms,
            "timing": timing
        }

# API端点：获取性能统计
@router_kg.get("/stats")
async def get_performance_stats():
    """获取系统性能统计信息"""
    return {"stats": performance_monitor.get_stats()}

# API端点：清除缓存
@router_kg.post("/clear_cache")
async def clear_cache_api():
    """清除搜索缓存"""
    count = cache_mgr.clear_cache()
    return {"status": "success", "cleared_items": count}

@router_kg.get("/list_kb")
async def list_kb():
    kb_dirs = [
        name for name in os.listdir(KNOWLEDGE_BASE_DIR)
        if os.path.isdir(os.path.join(KNOWLEDGE_BASE_DIR, name))
    ]
    return {"knowledge_bases": kb_dirs}

# API端点：上传多个文件到知识库
@router_kg.post("/load_file_multi")
@performance_monitor.measure(name="load_file_multi_api")
async def load_file_multi(
    files: List[UploadFile] = File(...),
    kb_name: str = Form("default"),
    use_overlap_segments: bool = Form(False),
    full_content_merge_neigh: bool = Form(True),
    background_tasks: BackgroundTasks = None
):
    """
    上传多个文件到知识库
    - files: 要上传的多个文件
    - kb_name: 知识库名称，默认为default
    - use_overlap_segments: 是否使用重叠分段，默认为False
    - full_content_merge_neigh: 是否将相邻段落合并到主要段落的full_content中，默认为True
    """
    start_time = time.time()
    
    # 检查文件数量
    if len(files) == 0:
        raise HTTPException(
            status_code=400,
            detail="未提供任何文件"
        )
    
    # 支持的文件扩展名
    supported_extensions = ['.pdf', '.txt', '.docx', '.pptx', '.epub', '.md']
    
    # 准备知识库目录
    kb_dir = os.path.join(KNOWLEDGE_BASE_DIR, kb_name)
    os.makedirs(kb_dir, exist_ok=True)
    
    # 获取知识库实例
    kb = get_knowledge_base(kb_name, use_overlap_segments)
    
    # 处理每个文件
    results = []
    for file in files:
        file_extension = os.path.splitext(file.filename)[1].lower()
        
        # 检查文件类型是否支持
        if file_extension not in supported_extensions:
            results.append({
                "filename": file.filename,
                "status": "error",
                "message": f"不支持的文件类型。支持的类型: {', '.join(supported_extensions)}"
            })
            continue
        
        try:
            # 保存文件
            file_path = os.path.join(kb_dir, file.filename)
            with open(file_path, "wb") as buffer:
                shutil.copyfileobj(file.file, buffer)
            
            # 加载文件到知识库
            success = kb.load_file(file_path)
            
            if not success:
                # 如果加载失败，删除文件
                if os.path.exists(file_path):
                    os.remove(file_path)
                results.append({
                    "filename": file.filename,
                    "status": "error",
                    "message": "文件处理失败"
                })
            else:
                results.append({
                    "filename": file.filename,
                    "status": "success",
                    "message": f"文件已成功加载到知识库",
                    "file_path": str(file_path)
                })
        except Exception as e:
            # 处理可能的异常
            results.append({
                "filename": file.filename,
                "status": "error",
                "message": f"处理文件时出错: {str(e)}"
            })
    
    # 计算总处理时间
    duration_ms = round((time.time() - start_time) * 1000, 2)
    
    # 统计成功和失败的文件数
    success_count = sum(1 for result in results if result["status"] == "success")
    error_count = sum(1 for result in results if result["status"] == "error")
    
    return JSONResponse(
        content={
            "status": "completed", 
            "message": f"处理了 {len(files)} 个文件, {success_count} 成功, {error_count} 失败",
            "processing_time_ms": duration_ms,
            "overlap_segments": use_overlap_segments,
            "full_content_merge_neigh": full_content_merge_neigh,
            "results": results
        }
    )

# API端点：上传文件夹到知识库
@router_kg.post("/load_directory")
@performance_monitor.measure(name="load_directory_api")
async def load_directory(
    directory_path: str = Form(...),
    kb_name: str = Form("default"),
    use_overlap_segments: bool = Form(False),
    recursive: bool = Form(True),
    background_tasks: BackgroundTasks = None
):
    """
    上传文件夹到知识库
    - directory_path: 要上传的文件夹路径
    - kb_name: 知识库名称，默认为default
    - use_overlap_segments: 是否使用重叠分段，默认为False
    - recursive: 是否递归处理子文件夹，默认为True
    """
    start_time = time.time()
    
    # 检查目录是否存在
    if not os.path.exists(directory_path) or not os.path.isdir(directory_path):
        raise HTTPException(
            status_code=400,
            detail=f"目录不存在或不是有效的目录: {directory_path}"
        )
    
    # 支持的文件扩展名
    supported_extensions = ['.pdf', '.txt', '.docx', '.pptx', '.epub', '.md']
    
    # 准备知识库目录
    kb_dir = os.path.join(KNOWLEDGE_BASE_DIR, kb_name)
    os.makedirs(kb_dir, exist_ok=True)
    
    # 获取知识库实例
    kb = get_knowledge_base(kb_name, use_overlap_segments)
    
    # 处理目录
    results = []
    file_count = 0
    error_count = 0
    
    # 函数用于处理目录中的文件
    def process_directory(dir_path, parent_path=""):
        nonlocal file_count, error_count
        
        # 获取目录中的所有文件和子目录
        items = os.listdir(dir_path)
        
        for item in items:
            item_path = os.path.join(dir_path, item)
            
            # 如果是目录且启用了递归
            if os.path.isdir(item_path) and recursive:
                # 创建对应的子目录
                rel_path = os.path.join(parent_path, item) if parent_path else item
                target_subdir = os.path.join(kb_dir, rel_path)
                os.makedirs(target_subdir, exist_ok=True)
                
                # 递归处理子目录
                process_directory(item_path, rel_path)
            
            # 如果是文件
            elif os.path.isfile(item_path):
                # 检查文件扩展名
                file_extension = os.path.splitext(item)[1].lower()
                if file_extension not in supported_extensions:
                    results.append({
                        "filename": item,
                        "path": item_path,
                        "status": "skipped",
                        "message": f"不支持的文件类型。支持的类型: {', '.join(supported_extensions)}"
                    })
                    continue
                
                try:
                    # 复制文件到知识库目录
                    target_path = os.path.join(kb_dir, parent_path, item)
                    os.makedirs(os.path.dirname(target_path), exist_ok=True)
                    shutil.copy2(item_path, target_path)
                    
                    # 加载文件到知识库
                    success = kb.load_file(target_path)
                    file_count += 1
                    
                    if not success:
                        # 如果加载失败，删除文件
                        if os.path.exists(target_path):
                            os.remove(target_path)
                        results.append({
                            "filename": item,
                            "path": item_path,
                            "status": "error",
                            "message": "文件处理失败"
                        })
                        error_count += 1
                    else:
                        results.append({
                            "filename": item,
                            "path": item_path,
                            "status": "success",
                            "message": f"文件已成功加载到知识库",
                            "target_path": target_path
                        })
                except Exception as e:
                    # 处理可能的异常
                    results.append({
                        "filename": item,
                        "path": item_path,
                        "status": "error",
                        "message": f"处理文件时出错: {str(e)}"
                    })
                    error_count += 1
    
    # 开始处理
    process_directory(directory_path)
    
    # 计算总处理时间
    duration_ms = round((time.time() - start_time) * 1000, 2)
    
    # 统计成功和失败的文件数
    success_count = file_count - error_count
    
    return JSONResponse(
        content={
            "status": "completed", 
            "message": f"处理了 {file_count} 个文件, {success_count} 成功, {error_count} 失败",
            "directory": directory_path,
            "processing_time_ms": duration_ms,
            "overlap_segments": use_overlap_segments,
            "recursive": recursive,
            "results": results
        }
    )

def main():
    # 预加载模型组件
    preload_components()
    
    # 创建知识库实例
    kb = KnowledgeBase()
    
    # 设置默认知识库目录
    kb_dir = os.environ.get("KNOWLEDGE_BASE_DIR", "./knowledge_base")
    default_kb_dir = os.path.join(kb_dir, "default")
    
    # 确保默认知识库目录存在
    os.makedirs(default_kb_dir, exist_ok=True)
    
    # 加载默认知识库中的文档
    print(f"正在加载默认知识库: {default_kb_dir}")
    kb.load_documents_from_directory(default_kb_dir)
    
    # 简单的命令行搜索界面
    while True:
        print("\n" + "="*50)
        query = input("请输入搜索内容(输入'quit'退出): ").strip()
        if query.lower() == 'quit':
            break
        
        if not query:
            continue
        
        results = kb.search(query)
        
        if not results:
            print("没有找到相关结果")
            continue
        
        print(f"\n找到 {len(results)} 个相关结果:")
        for i, result in enumerate(results, 1):
            print(f"\n结果 {i}:")
            print(f"相似度: {result['similarity']:.4f}")
            print(f"文件: {result['file_name']} (第 {result['page_num']} 页, 段落 {result['para_num']})")
            print(f"路径: {result['file_path']}")
            print("内容:")
            print(result['content'][:500] + ("..." if len(result['content']) > 500 else ""))
    
    print("程序结束")

if __name__ == "__main__":
    main()

# API端点：获取和修改嵌入模型配置
@router_kg.get("/embedding_model")
async def get_embedding_model():
    """获取当前使用的嵌入模型配置"""
    # 使用main.py中预加载的模型
    embedding_model = model_registry.embedding_model or get_optimized_embedder()
    
    return {
        "model_type": MODEL_CONFIG["EMBEDDING_MODEL_TYPE"],
        "bge_model_path": MODEL_CONFIG["BGE_MODEL_PATH"],
        "bge_use_fp16": MODEL_CONFIG["BGE_MODEL_USE_FP16"],
        "st_model_path": MODEL_CONFIG["ST_MODEL_PATH"],
        "st_use_fp16": MODEL_CONFIG["ST_MODEL_USE_FP16"],
        "hf_endpoint": MODEL_CONFIG["HF_ENDPOINT"],
        "current_model_type": embedding_model._get_model_info(),
    }

@router_kg.post("/embedding_model")
async def set_embedding_model(
    model_type: str = Form(...),  # "bge" 或 "sentence_transformer"
    model_path: str = Form(None),  # 模型路径
    use_fp16: bool = Form(True),   # 是否使用FP16
):
    """
    修改当前使用的嵌入模型配置
    - model_type: 模型类型，"bge" 或 "sentence_transformer"
    - model_path: 模型路径或名称
    - use_fp16: 是否使用FP16加速
    """
    # 验证模型类型
    if model_type not in ["bge", "sentence_transformer"]:
        raise HTTPException(
            status_code=400,
            detail="不支持的模型类型，只支持 'bge' 或 'sentence_transformer'"
        )
    
    # 获取默认模型路径
    if model_path is None:
        if model_type == "bge":
            model_path = MODEL_CONFIG["BGE_MODEL_PATH"]
        else:
            model_path = MODEL_CONFIG["ST_MODEL_PATH"]
    
    # 更新全局配置
    MODEL_CONFIG["EMBEDDING_MODEL_TYPE"] = model_type
    
    # 重新初始化嵌入模型
    try:
        # 更新main.py中的嵌入模型
        model_registry.embedding_model = OptimizedEmbedding(
            model_path=model_path,
            use_fp16=use_fp16,
            model_type=model_type
        )
        
        # 测试嵌入是否正常工作
        test_result = await model_registry.embedding_model.batch_embed(["测试嵌入"])
        
        # 返回更新后的配置
        return {
            "status": "success",
            "message": f"成功切换嵌入模型为 {model_type}: {model_path}",
            "model_type": model_type,
            "model_path": model_path,
            "use_fp16": use_fp16,
            "embedding_dimension": len(test_result[0]) if test_result else None,
            "model_info": model_registry.embedding_model._get_model_info()
        }
    except Exception as e:
        raise HTTPException(
            status_code=500,
            detail=f"更新嵌入模型失败: {str(e)}"
        )

# API端点：获取重排序模型配置
@router_kg.get("/rerank_model")
async def get_rerank_model():
    """获取当前的重排序模型配置"""
    # 使用main.py中预加载的模型
    rerank_model = model_registry.rerank_model or get_optimized_rerank()
    
    # 获取模型信息
    model_info = {}
    if hasattr(rerank_model, "model_type"):
        model_info["model_type"] = rerank_model.model_type
    if hasattr(rerank_model, "model_path"):
        model_info["model_path"] = rerank_model.model_path
    
    return {
        "status": "success",
        "model_type": MODEL_CONFIG["RERANK_MODEL_TYPE"],
        "models": {
            "flag": {
                "path": MODEL_CONFIG["RERANK_MODEL_PATH_FLAG"],
                "use_fp16": MODEL_CONFIG["RERANK_MODEL_USE_FP16"]
            },
            "sentence_transformer": {
                "path": MODEL_CONFIG["RERANK_MODEL_PATH_ST"],
                "use_fp16": MODEL_CONFIG["ST_RERANK_MODEL_USE_FP16"]
            }
        },
        "current_model_type": MODEL_CONFIG["RERANK_MODEL_TYPE"],
        "active_model_path": MODEL_CONFIG["RERANK_MODEL_PATH_FLAG"] if MODEL_CONFIG["RERANK_MODEL_TYPE"] == "flag" else MODEL_CONFIG["RERANK_MODEL_PATH_ST"],
        "model_info": model_info
    }

# API端点：设置重排序模型
@router_kg.post("/rerank_model")
async def set_rerank_model(
    model_type: str = Form(...),  # "flag" 或 "sentence_transformer"
    model_path: str = Form(None),  # 模型路径，如果为None则使用默认路径
    use_fp16: bool = Form(True),   # 是否使用FP16精度
):
    """
    设置重排序模型配置
    - model_type: 模型类型，"flag" 表示使用FlagEmbedding库，"sentence_transformer" 表示使用SentenceTransformer库
    - model_path: 模型路径，如果为None则使用默认路径
    - use_fp16: 是否使用FP16精度
    """
    # 验证模型类型
    if model_type not in ["flag", "sentence_transformer"]:
        return JSONResponse(
            status_code=400,
            content={
                "status": "error",
                "message": "不支持的模型类型，必须是 'flag' 或 'sentence_transformer'"
            }
        )
    
    # 更新环境变量
    os.environ["RERANK_MODEL_TYPE"] = model_type
    
    # 确定模型路径
    if model_path:
        if model_type == "flag":
            os.environ["RERANK_MODEL_PATH_FLAG"] = model_path
        else:
            os.environ["RERANK_MODEL_PATH_ST"] = model_path
    
    # 设置FP16
    if model_type == "flag":
        os.environ["RERANK_MODEL_USE_FP16"] = str(use_fp16)
    else:
        os.environ["ST_RERANK_MODEL_USE_FP16"] = str(use_fp16)
    
    # 更新MODEL_CONFIG以反映变化
    MODEL_CONFIG["RERANK_MODEL_TYPE"] = model_type
    if model_type == "flag":
        MODEL_CONFIG["RERANK_MODEL_PATH_FLAG"] = model_path or MODEL_CONFIG["RERANK_MODEL_PATH_FLAG"]
        MODEL_CONFIG["RERANK_MODEL_USE_FP16"] = use_fp16
    else:
        MODEL_CONFIG["RERANK_MODEL_PATH_ST"] = model_path or MODEL_CONFIG["RERANK_MODEL_PATH_ST"]
        MODEL_CONFIG["ST_RERANK_MODEL_USE_FP16"] = use_fp16
    
    # 重新初始化重排序模型
    try:
        # 更新main.py中的重排序模型
        model_registry.rerank_model = OptimizedReranker(
            model_path=model_path,
            use_fp16=use_fp16,
            model_type=model_type
        )
        
        # 返回更新后的配置
        return {
            "status": "success",
            "message": "重排序模型配置已更新",
            "model_type": model_type,
            "model_path": model_path or (MODEL_CONFIG["RERANK_MODEL_PATH_FLAG"] if model_type == "flag" else MODEL_CONFIG["RERANK_MODEL_PATH_ST"]),
            "use_fp16": use_fp16
        }
    except Exception as e:
        return JSONResponse(
            status_code=500,
            content={
                "status": "error",
                "message": f"更新重排序模型失败: {str(e)}"
            }
        )
