#!/usr/bin/env python3
"""
基于阿里云百炼的RAG知识库管理
"""

import os
import json
import pandas as pd
import numpy as np
from ast import literal_eval
import dashscope
from dashscope import TextEmbedding, TextReRank
from typing import List, Dict, Any, Optional
from http import HTTPStatus
import logging
from pathlib import Path
from datetime import datetime
import requests

logger = logging.getLogger(__name__)

# 设置DashScope的API Key
import sys
from pathlib import Path
project_root = Path(__file__).parent.parent
sys.path.insert(0, str(project_root))

from shared.config.llm_config import DASHSCOPE_API_KEY, get_embedding_config, get_rag_config

dashscope.api_key = DASHSCOPE_API_KEY

class RAGKnowledgeBase:
    """基于阿里云百炼的RAG知识库"""
    
    def __init__(self, knowledge_base_dir: str):
        self.knowledge_base_dir = Path(knowledge_base_dir)
        self.knowledge_base_dir.mkdir(parents=True, exist_ok=True)
        self.functions_df = None
        self.embeddings_file = self.knowledge_base_dir / "function_embeddings.xlsx"
        
        # 加载RAG配置
        self.rag_config = get_rag_config()
        self.enable_rerank = self.rag_config['enable_rerank']
        self.default_candidate_k = self.rag_config['candidate_k']
        self.default_rerank_top_k = self.rag_config['rerank_top_k']
        self.default_final_top_k = self.rag_config['final_top_k']
        self.rerank_model = self.rag_config['rerank']['model']
        
    def generate_embeddings(self, text):
        """生成文本嵌入向量"""
        try:
            rsp = TextEmbedding.call(
                model=TextEmbedding.Models.text_embedding_v3, 
                input=text
            )
            embeddings = [record['embedding'] for record in rsp.output['embeddings']]
            return embeddings if isinstance(text, list) else embeddings[0]
        except Exception as e:
            logger.error(f"生成嵌入向量失败: {e}")
            return None
    
    def cosine_similarity(self, a, b):
        """计算余弦相似度"""
        return np.dot(a, b) / (np.linalg.norm(a) * np.linalg.norm(b))
    
    def rerank_documents(self, query: str, documents: List[str], top_n: int = 15) -> List[Dict[str, Any]]:
        """使用阿里云百炼 gte-rerank-v2 模型对文档重新排序"""
        try:
            if not documents:
                logger.warning("没有文档需要重排序")
                return []
            
            # 限制文档数量，最大500个
            if len(documents) > 500:
                logger.warning(f"文档数量 {len(documents)} 超过最大限制500，将截取前500个")
                documents = documents[:500]
            
            # 调用阿里云百炼的 TextReRank API
            resp = TextReRank.call(
                model=self.rerank_model,
                query=query,
                documents=documents,
                top_n=min(top_n, len(documents)),  # 确保不超过文档数量
                return_documents=True
            )
            
            if resp.status_code == HTTPStatus.OK:
                results = []
                for result in resp.output['results']:
                    results.append({
                        'index': result['index'],
                        'relevance_score': result['relevance_score'],
                        'document': result['document']['text']
                    })
                
                logger.info(f"Rerank成功，重新排序了 {len(results)} 个文档")
                return results
            else:
                logger.error(f"Rerank调用失败，状态码: {resp.status_code}, 错误: {resp.code} - {resp.message}")
                return []
                
        except Exception as e:
            logger.error(f"Rerank重排序失败: {e}")
            return []
    
    def load_function_knowledge_base(self) -> bool:
        """加载函数知识库"""
        try:
            # 首先检查是否存在已保存的向量嵌入文件
            if self.embeddings_file.exists():
                try:
                    existing_df = pd.read_excel(self.embeddings_file)
                    existing_df["embedding"] = existing_df.embedding.apply(literal_eval).apply(np.array)
                    self.functions_df = existing_df
                    logger.info(f"从已保存文件加载了 {len(existing_df)} 个函数的向量嵌入")
                    
                    # 检查是否需要增量更新
                    existing_functions = set(existing_df['function_name'].tolist())
                except Exception as e:
                    logger.error(f"加载已保存的向量嵌入失败: {e}")
                    existing_df = pd.DataFrame()
                    existing_functions = set()
            else:
                existing_df = pd.DataFrame()
                existing_functions = set()
            
            # 优先从本地知识库文件获取函数信息，避免循环依赖
            functions = []
            
            # 尝试从本地知识库文件读取函数信息
            kb_json_file = self.knowledge_base_dir / "function_knowledge_base.json"
            if kb_json_file.exists():
                try:
                    import json
                    with open(kb_json_file, 'r', encoding='utf-8') as f:
                        kb_data = json.load(f)
                    functions = kb_data.get('functions', [])
                    logger.info(f"从本地知识库文件读取到 {len(functions)} 个函数")
                except Exception as e:
                    logger.error(f"读取本地知识库文件失败: {e}")
            
            # 如果本地文件没有函数，尝试从MCP Generator获取
            if not functions:
                logger.info("本地知识库文件为空，尝试从MCP Generator获取函数列表")
                
                try:
                    response = requests.get("http://localhost:8010/function-list", timeout=10)
                    if response.status_code == 200:
                        function_data = response.json()
                        functions = function_data.get('functions', [])
                        logger.info(f"从MCP Generator获取到 {len(functions)} 个函数")
                    else:
                        logger.warning(f"从MCP Generator获取函数列表失败，状态码: {response.status_code}")
                except requests.exceptions.RequestException as e:
                    logger.warning(f"无法连接到MCP Generator: {e}")
            
            # 检查是否有函数数据
            if not functions:
                if not existing_df.empty:
                    logger.info("无新函数数据，但使用已保存的向量嵌入")
                    return True
                else:
                    logger.warning("无函数数据且无已保存的向量嵌入")
                    return False
            
            # 为每个函数生成嵌入向量（增量更新）
            logger.info(f"检查 {len(functions)} 个函数是否需要生成向量嵌入...")
            
            new_records = []
            
            for func in functions:
                # 类型检查和数据规范化
                if not isinstance(func, dict):
                    logger.warning(f"跳过非字典类型的函数数据: {type(func)}, 值: {func}")
                    continue
                
                # 处理 parameters 字段：如果是字符串，尝试解析为 JSON
                if 'parameters' in func and isinstance(func['parameters'], str):
                    try:
                        func['parameters'] = json.loads(func['parameters'])
                    except (json.JSONDecodeError, TypeError) as e:
                        logger.warning(f"无法解析函数参数的 JSON 字符串: {e}")
                        func['parameters'] = []
                
                function_name = func.get('function_name', '')
                
                # 关键修复：如果函数名为空，则跳过，防止为无效记录生成嵌入
                if not function_name:
                    logger.warning(f"跳过函数名为空的函数记录: {func}")
                    continue
                
                # 如果函数已存在，跳过
                if function_name in existing_functions:
                    continue
                        
                # 为每个函数单独构建优化的描述文本用于向量嵌入
                # 重点强调MCP调用方式和参数结构
                description_parts = [
                    f"MCP函数: {function_name}",
                    f"调用方式: call_indicator_function(function_name='{function_name}', parameters={{...}})",
                    f"功能描述: {func.get('description', '')}",
                    f"返回类型: {func.get('return_type', 'dict')}"
                ]
                        
                # 添加参数信息，区分必需和可选参数
                required_params = []
                optional_params = []
                
                if func.get('parameters'):
                    # 确保 parameters 是列表
                    if not isinstance(func['parameters'], list):
                        logger.warning(f"函数 '{function_name}' 的 parameters 不是列表类型，跳过参数处理")
                    else:
                        for param in func['parameters']:
                            # 确保 param 是字典
                            if not isinstance(param, dict):
                                logger.warning(f"函数 '{function_name}' 的参数项不是字典类型，跳过: {param}")
                                continue
                            
                            if param.get('name') == 'db':  # 跳过db参数
                                continue
                            
                            param_desc = f"{param.get('name', '')} ({param.get('type', 'Any')})"
                            if param.get('description'):
                                param_desc += f" - {param['description']}"
                            
                            if param.get('required', True):
                                required_params.append(param_desc)
                            else:
                                default_val = param.get('default', 'None')
                                param_desc += f" [默认: {default_val}]"
                                optional_params.append(param_desc)
                
                if required_params:
                    description_parts.append("必需参数:")
                    description_parts.extend([f"- {p}" for p in required_params])
                
                if optional_params:
                    description_parts.append("可选参数:")
                    description_parts.extend([f"- {p}" for p in optional_params])
                        
                # 添加调用示例
                example_params = {}
                if func.get('parameters') and isinstance(func['parameters'], list):
                    for param in func['parameters']:
                        # 确保 param 是字典
                        if not isinstance(param, dict):
                            continue
                        
                        if param.get('name') == 'db':
                            continue
                        if param.get('required', True):
                            param_name = param.get('name', '')
                            param_type = param.get('type', '')
                            # 处理复杂的类型注解，如 Annotated[int, '描述']
                            base_type = param_type
                            if 'Annotated[' in param_type:
                                # 提取Annotated中的基础类型
                                import re
                                match = re.search(r'Annotated\[([^,]+)', param_type)
                                if match:
                                    base_type = match.group(1).strip()
                            
                            if 'int' in base_type and 'List' not in base_type:
                                example_params[param_name] = 12345
                            elif 'List[int]' in base_type or 'List[int]' in param_type:
                                example_params[param_name] = [1, 2, 3]
                            elif 'str' in base_type and 'List' not in base_type:
                                example_params[param_name] = f"示例{param_name}"
                            else:
                                example_params[param_name] = f"<{param_name}>"
                
                if example_params:
                    import json
                    example_json = json.dumps(example_params, ensure_ascii=False, indent=2)
                    description_parts.append(f"调用示例:")
                    description_parts.append(f"call_indicator_function(")
                    description_parts.append(f"    function_name='{function_name}',")
                    description_parts.append(f"    parameters={example_json}")
                    description_parts.append(f")")
                
                # 添加来源文件信息
                if func.get('source_file'):
                    description_parts.append(f"来源文件: {func.get('source_file')}")
                
                full_description = "\n".join(description_parts)
                
                # 为每个函数生成独立的嵌入向量
                embedding = self.generate_embeddings(full_description)
                if embedding is not None:
                    new_records.append({
                        'function_name': function_name,
                        'description': func.get('description', ''),
                        'full_description': full_description,
                        'parameters': json.dumps(func.get('parameters', []), ensure_ascii=False),
                        'return_type': func.get('return_type', 'dict'),
                        'source_file': func.get('source_file', ''),
                        'created_at': func.get('created_at', ''),
                        'updated_at': func.get('updated_at', ''),
                        'embedding': embedding
                    })
                    logger.info(f"为函数 '{function_name}' 生成了独立的嵌入向量")
            
            # 合并新旧数据
            if new_records:
                new_df = pd.DataFrame(new_records)
                if not existing_df.empty:
                    # 重新转换现有嵌入为字符串以便保存
                    existing_df_save = existing_df.copy()
                    existing_df_save["embedding"] = existing_df_save.embedding.apply(
                        lambda x: x.tolist() if isinstance(x, np.ndarray) else x
                    )
                    all_df = pd.concat([existing_df_save, new_df], ignore_index=True)
                else:
                    all_df = new_df
                
                # 保存到Excel文件
                all_df_save = all_df.copy()
                all_df_save["embedding"] = all_df_save.embedding.apply(
                    lambda x: x.tolist() if isinstance(x, np.ndarray) else x
                )
                all_df_save.to_excel(self.embeddings_file, index=False, engine='openpyxl')
                
                logger.info(f"新增 {len(new_records)} 个函数嵌入，总计 {len(all_df)} 个函数")
            else:
                all_df = existing_df
                logger.info(f"无新增函数，使用现有 {len(all_df)} 个函数")
            
            # 确保embedding列是numpy数组格式
            if not all_df.empty and 'embedding' in all_df.columns:
                all_df["embedding"] = all_df.embedding.apply(
                    lambda x: np.array(x) if not isinstance(x, np.ndarray) else x
                )
            
            self.functions_df = all_df
            
            # 更新知识库文件（JSON和TXT格式）
            self._update_knowledge_base_files()
            
            return True
                    
        except requests.exceptions.RequestException as e:
            logger.error(f"无法连接到MCP Generator服务: {e}")
            # 如果已经有加载的数据，仍然返回成功
            if not existing_df.empty:
                self.functions_df = existing_df
                # 更新知识库文件（JSON和TXT格式）
                self._update_knowledge_base_files()
                logger.info(f"MCP Generator服务不可用，使用已保存的 {len(existing_df)} 个函数向量嵌入")
                return True
            logger.warning("MCP Generator服务不可用且无已保存数据")
            return False
                
        except Exception as e:
            logger.error(f"加载函数知识库失败: {e}")
            # 如果已经有加载的数据，仍然返回成功
            try:
                if 'existing_df' in locals() and not existing_df.empty:
                    self.functions_df = existing_df
                    # 更新知识库文件（JSON和TXT格式）
                    self._update_knowledge_base_files()
                    logger.info(f"异常情况下使用已保存的 {len(existing_df)} 个函数向量嵌入")
                    return True
            except:
                pass
            return False
    
    def search_relevant_functions(self, query: str, top_k: int = None, rag_top_k: int = None, enable_rerank: bool = None) -> List[Dict[str, Any]]:
        """搜索相关函数，使用两阶段检索：向量相似度 + Rerank重排序"""
        # 使用配置默认值
        if top_k is None:
            top_k = self.default_final_top_k
        if rag_top_k is None:
            rag_top_k = self.default_candidate_k
        if enable_rerank is None:
            enable_rerank = self.enable_rerank
        
        # 延迟加载 - 在首次使用时加载知识库
        if self.functions_df is None or self.functions_df.empty:
            logger.info("首次使用RAG知识库，正在加载...")
            if not self.load_function_knowledge_base():
                logger.warning("函数知识库加载失败")
                return []
        
        try:
            # 第一阶段：向量相似度检索（获取更多候选，如50个）
            logger.info(f"第一阶段：向量相似度检索，目标获取 {rag_top_k} 个候选函数")
            
            # 生成查询的嵌入向量
            query_embedding = self.generate_embeddings(query)
            if query_embedding is None:
                logger.error("生成查询嵌入向量失败")
                return []
            
            # 计算相似度
            similarities = []
            for idx, row in self.functions_df.iterrows():
                try:
                    similarity = self.cosine_similarity(query_embedding, row['embedding'])
                    similarities.append((idx, similarity, row))
                except Exception as e:
                    logger.warning(f"计算相似度失败，跳过函数 {row.get('function_name', '')}: {e}")
                    continue
            
            # 按相似度排序，取rag_top_k个候选
            similarities.sort(key=lambda x: x[1], reverse=True)
            top_candidates = similarities[:rag_top_k]
            
            logger.info(f"向量相似度检索完成，获得 {len(top_candidates)} 个候选函数")
            
            # 如果禁用Rerank或候选数量不足，直接使用向量相似度结果
            if not enable_rerank or len(top_candidates) <= top_k:
                logger.info("使用向量相似度结果（未启用Rerank或候选数量不足）")
                final_candidates = top_candidates[:top_k]
                return self._build_function_results(final_candidates, use_vector_score=True)
            
            # 第二阶段：Rerank重排序
            logger.info(f"第二阶段：Rerank重排序，从 {len(top_candidates)} 个候选中选出最相关的 {top_k} 个")
            
            # 准备Rerank的输入文档
            candidate_documents = []
            candidate_mapping = {}  # 映射rerank索引到原始数据
            
            for i, (idx, similarity, row) in enumerate(top_candidates):
                # 使用函数的完整描述作为重排序文档
                doc_text = row['full_description']
                candidate_documents.append(doc_text)
                candidate_mapping[i] = (idx, similarity, row)
            
            # 调用Rerank重排序
            rerank_results = self.rerank_documents(query, candidate_documents, top_n=top_k)
            
            if rerank_results:
                logger.info(f"Rerank重排序成功，获得 {len(rerank_results)} 个重排序结果")
                
                # 构建最终结果，使用Rerank的相关性分数
                final_results = []
                for rerank_result in rerank_results:
                    original_idx = rerank_result['index']
                    if original_idx in candidate_mapping:
                        idx, vector_similarity, row = candidate_mapping[original_idx]
                        
                        try:
                            parameters = json.loads(row['parameters']) if row['parameters'] else []
                        except:
                            parameters = []
                        
                        final_results.append({
                            'function_name': row['function_name'],
                            'description': row['description'],
                            'full_description': row['full_description'],
                            'parameters': parameters,
                            'return_type': row['return_type'],
                            'similarity_score': float(vector_similarity),  # 保留向量相似度分数
                            'rerank_score': float(rerank_result['relevance_score'])  # 添加Rerank分数
                        })
                
                logger.info(f"两阶段检索完成，最终返回 {len(final_results)} 个函数")
                return final_results
            else:
                # Rerank失败，降级到向量相似度结果
                logger.warning("Rerank重排序失败，降级使用向量相似度结果")
                final_candidates = top_candidates[:top_k]
                return self._build_function_results(final_candidates, use_vector_score=True)
            
        except Exception as e:
            logger.error(f"搜索相关函数失败: {e}")
            return []
    
    def _build_function_results(self, candidates: List[tuple], use_vector_score: bool = True) -> List[Dict[str, Any]]:
        """构建函数结果列表的辅助方法"""
        results = []
        for idx, similarity, row in candidates:
            try:
                parameters = json.loads(row['parameters']) if row['parameters'] else []
            except:
                parameters = []
            
            result = {
                'function_name': row['function_name'],
                'description': row['description'],
                'full_description': row['full_description'],
                'parameters': parameters,
                'return_type': row['return_type'],
                'similarity_score': float(similarity)
            }
            
            results.append(result)
        
        return results
    
    def get_functions_context(self, query: str, top_k: int = None, rag_top_k: int = None, enable_rerank: bool = None) -> str:
        """获取函数上下文信息，用于增强prompt"""
        relevant_functions = self.search_relevant_functions(query, top_k, rag_top_k, enable_rerank)
        
        if not relevant_functions:
            return "未找到相关的MCP函数。"
        
        context_parts = [
            "=== 可用的MCP函数 ===",
            f"根据您的查询 '{query}'，找到以下相关函数：",
            ""
        ]
        
        for i, func in enumerate(relevant_functions, 1):
            # 构建评分信息
            score_info = f"相似度: {func['similarity_score']:.3f}"
            if 'rerank_score' in func:
                score_info += f", Rerank: {func['rerank_score']:.3f}"
            
            context_parts.append(f"{i}. **{func['function_name']}** ({score_info})")
            context_parts.append(f"   描述: {func['description']}")
            context_parts.append(f"   返回类型: {func['return_type']}")
            
            if func['parameters']:
                context_parts.append("   参数:")
                for param in func['parameters']:
                    required = "必填" if param.get('required', True) else "可选"
                    param_line = f"     - {param.get('name', '')} ({param.get('type', 'Any')}) [{required}]"
                    if param.get('description'):
                        param_line += f": {param['description']}"
                    context_parts.append(param_line)
            else:
                context_parts.append("   参数: 无")
            
            context_parts.append("")
        
        context_parts.extend([
            "注意：现在只有一个统一的MCP工具 'call_indicator_function'，使用方式：",
            "- function_name: 选择上述函数中的一个",
            "- parameters: 提供该函数所需的参数字典",
            ""
        ])
        
        return "\n".join(context_parts)
    
    def refresh_knowledge_base(self) -> bool:
        """刷新知识库"""
        try:
            # 删除现有的知识库文件以强制重新生成
            files_to_clean = [
                self.embeddings_file,
                self.knowledge_base_dir / "function_knowledge_base.json",
                self.knowledge_base_dir / "function_knowledge_base.txt"
            ]
            
            for file_path in files_to_clean:
                if file_path.exists():
                    file_path.unlink()
                    logger.info(f"已删除知识库文件: {file_path.name}")
            
            # 重新加载
            return self.load_function_knowledge_base()
        except Exception as e:
            logger.error(f"刷新知识库失败: {e}")
            return False
    
    def _update_knowledge_base_files(self):
        """根据当前的functions_df更新json和txt知识库文件"""
        try:
            if self.functions_df is None or self.functions_df.empty:
                # 如果没有函数，删除文件
                json_file = self.knowledge_base_dir / "function_knowledge_base.json"
                txt_file = self.knowledge_base_dir / "function_knowledge_base.txt"
                
                for file_path in [json_file, txt_file]:
                    if file_path.exists():
                        file_path.unlink()
                        logger.info(f"已删除空的知识库文件: {file_path.name}")
                return
            
            # 重新构建函数列表
            function_list = []
            function_list_text_parts = []
            
            for idx, row in self.functions_df.iterrows():
                function_name = row.get('function_name', '')
                # 跳过函数名为空的无效记录
                if not function_name:
                    continue

                function_info = {
                    "function_name": function_name, # 统一使用 function_name 字段
                    "description": row.get('description', ''),
                    "parameters": row.get('parameters', ''),
                    "usage": row.get('usage', ''),
                    "updated_at": row.get('updated_at', '')
                }
                function_list.append(function_info)
                
                # 使用完整描述构建文本格式（包含修复后的调用示例）
                full_description = row.get('full_description', '')
                if full_description:
                    text_entry = f"""{idx + 1}. 【{function_name}】
{full_description}

"""
                else:
                    # 如果没有完整描述，使用简化格式
                    text_entry = f"""函数名称: {function_name}
描述: {function_info['description']}
参数: {function_info['parameters']}
用法: {function_info['usage']}
更新时间: {function_info['updated_at']}

---

"""
                function_list_text_parts.append(text_entry)
            
            # 更新JSON文件
            json_file = self.knowledge_base_dir / "function_knowledge_base.json"
            import json
            with open(json_file, 'w', encoding='utf-8') as f:
                json.dump({
                    "functions": function_list,
                    "count": len(function_list),
                    "updated_at": datetime.now().isoformat()
                }, f, ensure_ascii=False, indent=2)
            
            # 更新TXT文件
            txt_file = self.knowledge_base_dir / "function_knowledge_base.txt"
            with open(txt_file, 'w', encoding='utf-8') as f:
                # 添加文件头说明
                header = """=== MCP函数知识库 ===

所有函数都通过统一的MCP工具调用：call_indicator_function
调用格式：call_indicator_function(function_name='函数名称', parameters={参数字典})
注意：db参数会自动注入，无需在parameters中提供

=== 可用函数列表 ===

"""
                f.write(header)
                f.write(''.join(function_list_text_parts))
            
            logger.info(f"已同步更新知识库文件，当前 {len(function_list)} 个函数")
            
        except Exception as e:
            logger.error(f"更新知识库文件失败: {e}")

    def remove_function_embedding(self, function_name: str) -> bool:
        """从知识库中删除指定函数的向量嵌入"""
        try:
            # 检查嵌入文件是否存在
            if not self.embeddings_file.exists():
                logger.warning(f"嵌入文件不存在，无法删除函数 '{function_name}'")
                return True  # 文件不存在也算删除成功
            
            # 加载现有的嵌入数据
            try:
                df = pd.read_excel(self.embeddings_file, engine='openpyxl')
                if df.empty:
                    logger.info(f"嵌入文件为空，函数 '{function_name}' 已不存在")
                    return True
                
                # 安全地处理embedding列
                if 'embedding' in df.columns:
                    df["embedding"] = df.embedding.apply(
                        lambda x: np.array(literal_eval(x)) if isinstance(x, str) else np.array(x)
                    )
            except Exception as e:
                logger.error(f"加载嵌入文件失败: {e}")
                return False
            
            # 检查函数是否存在
            if function_name not in df['function_name'].values:
                logger.info(f"函数 '{function_name}' 在知识库中不存在，可能已被删除")
                return True  # 不存在也算删除成功
            
            # 删除指定函数的记录
            original_count = len(df)
            df_filtered = df[df['function_name'] != function_name]
            new_count = len(df_filtered)
            
            if original_count == new_count:
                logger.warning(f"函数 '{function_name}' 删除失败，记录数未变化")
                return False
            
            # 保存更新后的嵌入文件
            if new_count == 0:
                # 如果没有剩余函数，删除所有知识库文件
                files_to_clean = [
                    self.embeddings_file,
                    self.knowledge_base_dir / "function_knowledge_base.json",
                    self.knowledge_base_dir / "function_knowledge_base.txt"
                ]
                
                for file_path in files_to_clean:
                    if file_path.exists():
                        file_path.unlink()
                        logger.info(f"已删除知识库文件: {file_path.name}")
                
                self.functions_df = None
                logger.info(f"已删除最后一个函数 '{function_name}'，知识库已完全清空")
            else:
                # 将embedding列转换为字符串格式保存
                df_to_save = df_filtered.copy()
                df_to_save["embedding"] = df_to_save.embedding.apply(
                    lambda x: x.tolist() if isinstance(x, np.ndarray) else x
                )
                df_to_save.to_excel(self.embeddings_file, index=False, engine='openpyxl')
                
                # 更新内存中的数据
                self.functions_df = df_filtered
                
                # 同步更新json和txt知识库文件
                self._update_knowledge_base_files()
                
                logger.info(f"成功从知识库中删除函数 '{function_name}'，剩余 {new_count} 个函数，已同步更新知识库文件")
            
            return True
            
        except Exception as e:
            logger.error(f"删除函数嵌入失败: {e}")
            return False


# 全局实例
rag_kb = None

def get_rag_knowledge_base() -> RAGKnowledgeBase:
    """获取RAG知识库实例"""
    global rag_kb
    if rag_kb is None:
        # 使用共享知识库目录
        project_root = Path(__file__).parent.parent.parent.parent
        knowledge_base_dir = project_root / "shared" / "knowledge_base"
        rag_kb = RAGKnowledgeBase(str(knowledge_base_dir))
        
        # 懒加载 - 如果存在已保存的向量嵌入文件，自动加载
        if rag_kb.embeddings_file.exists():
            try:
                rag_kb.load_function_knowledge_base()
                logger.info("RAG知识库懒加载成功")
            except Exception as e:
                logger.error(f"RAG知识库懒加载失败: {e}")
    return rag_kb


if __name__ == "__main__":
    # 测试RAG知识库
    kb = RAGKnowledgeBase("./knowledge_base")
    if kb.load_function_knowledge_base():
        print("知识库加载成功")
        
        # 测试搜索
        results = kb.search_relevant_functions("满意度指标数据")
        print(f"搜索结果: {len(results)} 个函数")
        
        # 获取上下文
        context = kb.get_functions_context("获取项目满意度数据")
        print("生成的上下文:")
        print(context)
    else:
        print("知识库加载失败")
