# knowledge/database/db_schema_manager.py

import os
import logging
import re
from typing import Dict, Optional, List
from llama_index.core import Document, VectorStoreIndex, StorageContext, Settings
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.embeddings.dashscope import (
    DashScopeEmbedding,
    DashScopeTextEmbeddingModels,
    DashScopeTextEmbeddingType,
)
import chromadb

# 导入日志工具
import sys
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
from knowledge.util.tools import logging_helper

class DBSchemaManager:
    """数据库Schema管理器 - 负责Schema的生成、向量化、存储和查询"""
    
    def __init__(self, chroma_db_path=None, collection_name="project_sys_schema", api_key=None, verbose=False):
        self.logger = logging.getLogger("KnowledgeBase.db_schema_manager")
        self.verbose = verbose
        
        # 设置ChromaDB路径
        if chroma_db_path is None:
            current_dir = os.path.dirname(os.path.abspath(__file__))
            kb_root = os.path.dirname(os.path.dirname(current_dir))  # knowledge 根目录
            chroma_db_path = os.path.join(kb_root, "enterprise_knowledge")
            
        self.chroma_db_path = chroma_db_path
        self.collection_name = collection_name
        self.schema_file_path = self._get_schema_file_path()
        self.api_key = api_key or self._get_api_key()
        
        # 确保目录存在
        os.makedirs(self.chroma_db_path, exist_ok=True)
        os.makedirs(os.path.dirname(self.schema_file_path), exist_ok=True)
        
        # 初始化嵌入模型
        self._setup_embedding_model()
        
        logging_helper.info_if_verbose(self.logger, self.verbose, f"数据库Schema管理器初始化完成")
        logging_helper.info_if_verbose(self.logger, self.verbose, f"向量存储路径: {self.chroma_db_path}")
        logging_helper.info_if_verbose(self.logger, self.verbose, f"Schema文件路径: {self.schema_file_path}")
        logging_helper.info_if_verbose(self.logger, self.verbose, f"使用嵌入模型: DashScope TEXT_EMBEDDING_V2")
    
    def _get_api_key(self):
        """获取API密钥"""
        # 从环境变量获取
        return os.getenv('BAILIAN_API_KEY') or os.getenv('DASHSCOPE_API_KEY')
    
    def _setup_embedding_model(self):
        """设置嵌入模型"""
        if not self.api_key:
            self.logger.warning("API密钥未配置，将使用默认嵌入模型")
            return
        
        try:
            # 设置DashScope嵌入模型
            # 确保模型为 TEXT_EMBEDDING_V2（维度 1536）
            Settings.embed_model = DashScopeEmbedding(
                model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V2,  # 关键：使用 V2 模型
                text_type=DashScopeTextEmbeddingType.TEXT_TYPE_DOCUMENT,
                api_key=self.api_key,
                timeout=30
            )
            # 验证嵌入模型维度
            test_embedding = Settings.embed_model.get_query_embedding("test")
            logging_helper.info_if_verbose(self.logger, self.verbose, f"DashScope嵌入模型设置成功，测试向量维度: {len(test_embedding)}")
        except Exception as e:
            self.logger.error(f"设置DashScope嵌入模型失败: {e}")
            # 可以回退到其他嵌入模型，但这里我们要求必须使用DashScope
            raise
    
    def _get_schema_file_path(self) -> str:
        """获取Schema文件路径"""
        current_dir = os.path.dirname(os.path.abspath(__file__))
        root_dir = os.path.dirname(os.path.dirname(current_dir))  # 项目根目录
        return os.path.join(root_dir, "database", "database_schema_latest.md")
    
    def _split_schema_by_tables(self, schema_content: str) -> List[Dict]:
        """
        按照表分割Schema内容，修复截断问题
        """
        tables = []
        
        # 修改正则表达式，确保能匹配完整的表内容（包括所有字段和关系）
        # 使用非贪婪模式匹配到下一个表的开始或文档结束
        table_pattern = r'---\s*\n## 📊 表: `(\w+)`(.*?)(?=---\s*\n## 📊 表:|## 🔗|\Z)'
        matches = re.findall(table_pattern, schema_content, re.DOTALL)
        
        for table_name, table_content in matches:
            # 清理内容并确保包含所有字段信息
            table_content = table_content.strip()
            
            # 提取表描述
            description_match = re.search(r'\*\*描述\*\*: (.*?)\n', table_content)
            description = description_match.group(1).strip() if description_match else ""
            
            # 提取业务含义
            business_meaning_match = re.search(r'\*\*业务含义\*\*: (.*?)\n', table_content)
            business_meaning = business_meaning_match.group(1).strip() if business_meaning_match else description
            
            # 确保字段表格被完整包含
            fields_start = table_content.find("### 字段结构")
            if fields_start != -1:
                fields_content = table_content[fields_start:]
            else:
                fields_content = "### 字段结构\n\n未找到字段信息"
            
            full_table_content = f"## 📊 表: `{table_name}`\n\n**描述**: {description}\n\n**业务含义**: {business_meaning}\n\n{fields_content}"
            
            tables.append({
                "table_name": table_name,
                "description": description,
                "business_meaning": business_meaning,
                "content": table_content,
                "full_content": full_table_content  # 使用确保包含完整字段的内容
            })
        
        # 处理全局信息和关系图部分（保持不变）
        global_info_match = re.search(r'# 企业知识库数据库结构说明(.*?)---', schema_content, re.DOTALL)
        if global_info_match:
            global_info = global_info_match.group(1).strip()
            tables.insert(0, {
                "table_name": "global_info",
                "description": "数据库全局信息",
                "business_meaning": "数据库整体结构和元数据信息",
                "content": global_info,
                "full_content": f"# 企业知识库数据库结构说明\n\n{global_info}"
            })
        
        relationship_match = re.search(r'## 🔗 全局表关系图(.*?)\Z', schema_content, re.DOTALL)
        if relationship_match:
            relationship_info = relationship_match.group(1).strip()
            tables.append({
                "table_name": "relationships",
                "description": "全局表关系图",
                "business_meaning": "数据库表之间的关联关系",
                "content": relationship_info,
                "full_content": f"## 🔗 全局表关系图\n\n{relationship_info}"
            })
        
        logging_helper.info_if_verbose(self.logger, self.verbose, f"Schema分割完成，共 {len(tables)} 个块")
        return tables
    
    def refresh_schema(self, db_manager) -> bool:
        """
        刷新数据库Schema描述文件
        
        Args:
            db_manager: DatabaseManager实例
            
        Returns:
            bool: 是否成功
        """
        try:
            logging_helper.info_if_verbose(self.logger, self.verbose, "开始刷新数据库Schema描述文件...")
            
            # 生成数据库Schema文件
            schema_json_file, latest_json_file, schema_text_file, latest_text_file = db_manager.getDBSchema()
            
            logging_helper.info_if_verbose(self.logger, self.verbose, f"数据库Schema刷新完成:")
            logging_helper.info_if_verbose(self.logger, self.verbose, f"  JSON格式: {latest_json_file}")
            logging_helper.info_if_verbose(self.logger, self.verbose, f"  文本格式: {latest_text_file}")
            
            # 更新schema文件路径
            self.schema_file_path = latest_text_file
            
            return True
            
        except Exception as e:
            self.logger.error(f"刷新数据库Schema失败: {e}")
            return False
    
    def vectorize_schema(self) -> bool:
        """
        将数据库Schema向量化存储到ChromaDB - 按表分块
        
        Returns:
            bool: 是否成功
        """
        try:
            # 检查Schema文件是否存在
            if not os.path.exists(self.schema_file_path):
                self.logger.error(f"Schema文件不存在: {self.schema_file_path}")
                return False
            
            # 读取Schema文件内容
            with open(self.schema_file_path, 'r', encoding='utf-8') as f:
                schema_content = f.read()
            
            if not schema_content.strip():
                self.logger.error("Schema文件内容为空")
                return False
            
            logging_helper.info_if_verbose(self.logger, self.verbose, f"开始向量化数据库Schema，文件: {self.schema_file_path}")
            
            # 按表分割Schema内容
            table_chunks = self._split_schema_by_tables(schema_content)
            if not table_chunks:
                self.logger.error("未能成功分割Schema内容")
                return False
            
            logging_helper.info_if_verbose(self.logger, self.verbose, f"Schema分割为 {len(table_chunks)} 个语义块")
            
            # 禁用ChromaDB遥测
            os.environ['ANONYMIZED_TELEMETRY'] = 'False'
            
            # 初始化ChromaDB客户端
            settings = chromadb.config.Settings(anonymized_telemetry=False)
            chroma_client = chromadb.PersistentClient(
                path=self.chroma_db_path,
                settings=settings
            )
            
            # 删除已存在的集合
            try:
                chroma_client.delete_collection(self.collection_name)
                logging_helper.info_if_verbose(self.logger, self.verbose, f"删除已存在的集合: {self.collection_name}")
            except Exception as e:
                logging_helper.info_if_verbose(self.logger, self.verbose, f"集合不存在，将创建新集合: {self.collection_name}")
            
            # 创建新集合
            collection = chroma_client.create_collection(
                name=self.collection_name,
                metadata={
                    "hnsw:space": "cosine", 
                    "description": "数据库系统Schema向量存储",
                    "type": "database_schema",
                    "embedding_model": "DashScope_TEXT_EMBEDDING_V2",
                    "chunk_strategy": "table_based"
                }
            )
            
            # 为每个表块创建文档
            documents = []
            for i, chunk in enumerate(table_chunks):
                doc = Document(
                    text=chunk["full_content"],
                    metadata={
                        "file_name": "database_schema_latest.md",
                        "file_type": "database_schema",
                        "collection": self.collection_name,
                        "description": chunk["description"],
                        "business_meaning": chunk["business_meaning"],
                        "table_name": chunk["table_name"],
                        "chunk_id": i,
                        "chunk_type": "table" if chunk["table_name"] not in ["global_info", "relationships"] else chunk["table_name"],
                        "source": "database_schema",
                        "embedding_model": "DashScope_TEXT_EMBEDDING_V2"
                    }
                )
                documents.append(doc)
                logging_helper.info_if_verbose(self.logger, self.verbose, f"创建文档块: {chunk['table_name']} (长度: {len(chunk['full_content'])} 字符)")
            
            # 创建向量存储
            vector_store = ChromaVectorStore(chroma_collection=collection)
            storage_context = StorageContext.from_defaults(vector_store=vector_store)
            
            # 构建向量索引 - 使用全局设置的DashScope嵌入模型
            index = VectorStoreIndex.from_documents(
                documents,
                storage_context=storage_context,
                embed_model=Settings.embed_model  # 明确使用我们设置的DashScope嵌入模型
            )
            
            # 验证存储
            doc_count = collection.count()
            logging_helper.info_if_verbose(self.logger, self.verbose, f"数据库Schema向量化完成，存储文档数量: {doc_count}")
            
            # 验证嵌入模型是否正确使用
            if hasattr(Settings.embed_model, 'model_name'):
                logging_helper.info_if_verbose(self.logger, self.verbose, f"使用的嵌入模型: {Settings.embed_model.model_name}")
            
            # 显示分块统计
            table_chunks_count = len([d for d in documents if d.metadata.get("chunk_type") == "table"])
            global_chunks_count = len([d for d in documents if d.metadata.get("chunk_type") == "global_info"])
            relationship_chunks_count = len([d for d in documents if d.metadata.get("chunk_type") == "relationships"])
            
            logging_helper.info_if_verbose(self.logger, self.verbose, f"分块统计: 表块={table_chunks_count}, 全局信息={global_chunks_count}, 关系图={relationship_chunks_count}")
            
            return True
            
        except Exception as e:
            self.logger.error(f"数据库Schema向量化失败: {e}")
            import traceback
            traceback.print_exc()
            return False
    
    def query_schema(self, query_text: str, filters: Optional[Dict] = None, n_results: int = 5) -> Dict:
        """
        查询数据库Schema
        
        Args:
            query_text: 查询文本
            filters: 过滤条件
            n_results: 返回结果数量
            
        Returns:
            dict: 查询结果
        """
        try:
            # 初始化ChromaDB客户端
            settings = chromadb.config.Settings(anonymized_telemetry=False)
            chroma_client = chromadb.PersistentClient(
                path=self.chroma_db_path,
                settings=settings
            )
            
            # 获取集合
            try:
                collection = chroma_client.get_collection(self.collection_name)
            except Exception as e:
                self.logger.error(f"获取集合失败: {e}")
                return {"error": f"集合 {self.collection_name} 不存在"}
            
            # 构建查询条件
            where_filter = {}
            if filters:
                if "file_type" in filters:
                    where_filter["file_type"] = {"$eq": filters["file_type"]}
                if "source" in filters:
                    where_filter["source"] = {"$eq": filters["source"]}
                if "table_name" in filters:
                    where_filter["table_name"] = {"$eq": filters["table_name"]}
                if "chunk_type" in filters:
                    where_filter["chunk_type"] = {"$eq": filters["chunk_type"]}
            
            # 重要：使用与向量化时相同的嵌入模型生成查询向量
            try:
                # 确保使用与向量化时相同的嵌入模型
                if hasattr(Settings, 'embed_model') and Settings.embed_model:
                    logging_helper.info_if_verbose(self.logger, self.verbose, f"使用嵌入模型生成查询向量: {Settings.embed_model.model_name}")
                    
                    # 使用与向量化时相同的嵌入模型生成查询向量
                    query_embedding = Settings.embed_model.get_query_embedding(query_text)
                    
                    # 执行带嵌入向量的查询
                    results = collection.query(
                        query_embeddings=[query_embedding],
                        n_results=n_results,
                        where=where_filter if where_filter else None
                    )
                    
                    embedding_model_used = Settings.embed_model.model_name
                else:
                    raise Exception("嵌入模型未配置")
                    
            except Exception as embed_error:
                self.logger.warning(f"使用嵌入向量查询失败，回退到文本查询: {embed_error}")
                # 回退到文本查询
                results = collection.query(
                    query_texts=[query_text],
                    n_results=n_results,
                    where=where_filter if where_filter else None
                )
                embedding_model_used = "text_query_fallback"
            
            return {
                "query": query_text,
                "results": results,
                "collection": self.collection_name,
                "embedding_model": embedding_model_used,
                "chunk_strategy": "table_based",
                "total_results": len(results.get('documents', [])[0]) if results.get('documents') else 0
            }
            
        except Exception as e:
            self.logger.error(f"查询数据库Schema失败: {e}")
            return {"error": str(e)}
    
    def get_schema_info(self) -> Dict:
        """
        获取Schema信息
        
        Returns:
            dict: Schema信息
        """
        try:
            # 初始化ChromaDB客户端
            settings = chromadb.config.Settings(anonymized_telemetry=False)
            chroma_client = chromadb.PersistentClient(
                path=self.chroma_db_path,
                settings=settings
            )
            
            info = {
                "schema_file_path": self.schema_file_path,
                "schema_file_exists": os.path.exists(self.schema_file_path),
                "collection_name": self.collection_name,
                "chroma_db_path": self.chroma_db_path,
                "embedding_model": "DashScope_TEXT_EMBEDDING_V2",
                "chunk_strategy": "table_based"
            }
            
            # 检查集合是否存在
            try:
                collection = chroma_client.get_collection(self.collection_name)
                info["collection_exists"] = True
                info["document_count"] = collection.count()
                
                # 获取分块统计信息
                try:
                    all_metadatas = collection.get()["metadatas"]
                    if all_metadatas:
                        table_chunks = len([m for m in all_metadatas if m.get("chunk_type") == "table"])
                        global_chunks = len([m for m in all_metadatas if m.get("chunk_type") == "global_info"])
                        relationship_chunks = len([m for m in all_metadatas if m.get("chunk_type") == "relationships"])
                        info["chunk_statistics"] = {
                            "table_chunks": table_chunks,
                            "global_info_chunks": global_chunks,
                            "relationship_chunks": relationship_chunks,
                            "total_chunks": len(all_metadatas)
                        }
                        
                        # 提取所有表的表名和描述
                        table_info_list = []
                        for metadata in all_metadatas:
                            if metadata.get("chunk_type") == "table":
                                table_info = {
                                    "table_name": metadata.get("table_name", ""),
                                    "description": metadata.get("description", "")
                                }
                                table_info_list.append(table_info)
                        
                        info["tables"] = table_info_list
                        
                except Exception as e:
                    self.logger.warning(f"获取分块统计信息失败: {e}")
                    
            except Exception as e:
                info["collection_exists"] = False
                info["document_count"] = 0
            
            # 如果Schema文件存在，获取文件信息并解析详细内容
            if info["schema_file_exists"]:
                file_stats = os.stat(self.schema_file_path)
                info["file_size"] = file_stats.st_size
                info["last_modified"] = file_stats.st_mtime
                
                # 从Schema文件中提取详细信息
                try:
                    with open(self.schema_file_path, 'r', encoding='utf-8') as f:
                        schema_content = f.read()
                    
                    # 提取全局信息（生成时间、数据库路径、表数量、总字段数）
                    global_info_match = re.search(
                        r'- \*\*生成时间\*\*: (.*?)\n' +
                        r'- \*\*数据库路径\*\*: (.*?)\n' +
                        r'- \*\*表数量\*\*: (.*?)\n' +
                        r'- \*\*总字段数\*\*: (.*?)\n',
                        schema_content
                    )
                    
                    if global_info_match:
                        info["global_info"] = {
                            "generate_time": global_info_match.group(1).strip(),
                            "database_path": global_info_match.group(2).strip(),
                            "table_count": int(global_info_match.group(3).strip()),
                            "total_fields": int(global_info_match.group(4).strip())
                        }
                    
                    # 使用正则表达式提取所有表的表名和描述
                    table_pattern = r'---\s*\n## 📊 表: `(\w+)`(.*?)(?=---|## 🔗|\Z)'
                    matches = re.findall(table_pattern, schema_content, re.DOTALL)
                    
                    tables_from_file = []
                    for table_name, table_content in matches:
                        # 提取表描述（去掉业务含义，只保留描述）
                        description_match = re.search(r'\*\*描述\*\*: (.*?)\n', table_content)
                        description = description_match.group(1).strip() if description_match else ""
                        
                        tables_from_file.append({
                            "table_name": table_name,
                            "description": description
                        })
                    
                    info["tables_from_file"] = tables_from_file
                    
                    # 构建完整的数据库描述，包含所有表的表名和描述
                    if global_info_match and tables_from_file:
                        database_description = (
                            f"数据库总体信息：\n"
                            f"- 生成时间: {global_info_match.group(1).strip()}\n"
                            f"- 数据库路径: {global_info_match.group(2).strip()}\n"
                            f"- 表数量: {global_info_match.group(3).strip()}\n"
                            f"- 总字段数: {global_info_match.group(4).strip()}\n\n"
                            f"所有数据表详情：\n"
                        )
                        
                        for i, table in enumerate(tables_from_file, 1):
                            database_description += f"{i}. {table['table_name']}: {table['description']}\n"
                        
                        info["database_description"] = database_description
                    
                except Exception as e:
                    self.logger.warning(f"从Schema文件解析详细信息失败: {e}")
            
            return info
            
        except Exception as e:
            self.logger.error(f"获取Schema信息失败: {e}")
            return {"error": str(e)}
    
    def delete_schema_collection(self) -> bool:
        """
        删除Schema集合
        
        Returns:
            bool: 是否成功
        """
        try:
            # 初始化ChromaDB客户端
            settings = chromadb.config.Settings(anonymized_telemetry=False)
            chroma_client = chromadb.PersistentClient(
                path=self.chroma_db_path,
                settings=settings
            )
            
            # 删除集合
            try:
                chroma_client.delete_collection(self.collection_name)
                logging_helper.info_if_verbose(self.logger, self.verbose, f"成功删除集合: {self.collection_name}")
                return True
            except Exception as e:
                logging_helper.info_if_verbose(self.logger, self.verbose, f"集合不存在，无需删除: {self.collection_name}")
                return True
                
        except Exception as e:
            self.logger.error(f"删除Schema集合失败: {e}")
            return False