from __future__ import annotations
import logging
from threading import Lock
from typing import Dict, TYPE_CHECKING
import re

from sqlalchemy import (
    create_engine,
    Table,
    Column,
    Integer,
    String,
    JSON,
    MetaData,
    delete,
    text,
)
from sqlalchemy.ext.asyncio import create_async_engine
from pgvector.sqlalchemy import Vector
from llama_index.vector_stores.postgres import PGVectorStore as NativePGVectorStore

from axiom_boot.conf.manager import Settings
from axiom_boot.di.decorators import service, conditional_on_setting
from axiom_boot.vector.store.pgvector_adapter import PGVectorStoreAdapter

if TYPE_CHECKING:
    from axiom_boot.vector.store.vector_store_interface import VectorStoreInterface


logger = logging.getLogger(__name__)


@service()
@conditional_on_setting(key="vector__store__provider", expected_value="pgvector")
class PGVectorStoreManager:
    _stores: Dict[str, "VectorStoreInterface"]

    def __init__(self, settings: Settings):
        """
        构造函数现在只依赖于 Settings，并根据专用配置初始化独立的数据库引擎。
        """
        self._settings = settings
        self._stores = {}
        self._lock = Lock()
        self._engine = None
        self._async_engine = None
        self._initialize_engines()

    def _initialize_engines(self):
        """
        根据专用的向量数据库 URL 初始化独立的同步和异步引擎。
        """
        # 从 settings 中获取 pgvector 的专项配置
        pg_settings = self._settings.vector.pgvector
        if not all([pg_settings.user, pg_settings.password, pg_settings.host, pg_settings.database]):
            raise ValueError("向量数据库的 user, password, host, database 必须在配置中完整设置 (在 vector.pgvector 下)")

        # 根据专项配置，动态构建数据库连接 URL
        vector_db_url = (
            f"postgresql://{pg_settings.user}:{pg_settings.password}@"
            f"{pg_settings.host}:{pg_settings.port}/{pg_settings.database}"
        )

        # 创建独立的同步引擎
        self._engine = create_engine(vector_db_url)
        
        # 创建独立的异步引擎
        async_db_url = vector_db_url.replace("postgresql://", "postgresql+asyncpg://")
        self._async_engine = create_async_engine(async_db_url)

    def get_store(self, collection_name: str, embed_dim: int) -> "VectorStoreInterface":
        if collection_name not in self._stores:
            with self._lock:
                if collection_name not in self._stores:
                    self._stores[collection_name] = self._create_store(collection_name, embed_dim)
        return self._stores[collection_name]

    async def create_collection_if_not_exists(self, collection_name: str, embed_dim: int) -> None:
        # get_store 内部会处理创建逻辑
        self.get_store(collection_name, embed_dim)

    async def delete_collection(self, collection_name: str) -> None:
        """
        使用独立的引擎连接来删除表。
        """
        if not self._engine:
            raise RuntimeError("数据库引擎未被正确初始化。")

        table_name = f"data_{collection_name}"
        metadata = MetaData()
        table = Table(table_name, metadata)
        
        with self._engine.connect() as connection:
            with connection.begin():
                table.drop(connection, checkfirst=True)
        
        # 从缓存中移除
        with self._lock:
            if collection_name in self._stores:
                del self._stores[collection_name]

    async def delete_collections_with_prefix(self, prefix: str):
        """
        [新增] 根据前缀，发现并删除所有匹配的向量集合（表）。
        这是清理多路召回策略下所有相关表的正确方法。
        """
        if not self._engine:
            raise RuntimeError("数据库引擎未被正确初始化。")

        safe_prefix = f"data_{re.sub(r'[^a-z0-9_]', '_', prefix.lower())}"
        
        with self._engine.connect() as connection:
            with connection.begin():
                try:
                    # 1. 从 information_schema 中查找所有匹配的表名
                    find_tables_stmt = text(
                        "SELECT table_name FROM information_schema.tables "
                        "WHERE table_schema = 'public' AND table_name LIKE :prefix"
                    )
                    result = connection.execute(find_tables_stmt, {"prefix": f"{safe_prefix}%"})
                    tables_to_drop = [row[0] for row in result.fetchall()]
                    
                    if not tables_to_drop:
                        logger.info(f"未找到任何以 '{safe_prefix}' 开头的向量表，无需删除。")
                        return

                    # 2. 遍历并删除每一张找到的表
                    for table_name in tables_to_drop:
                        # 确保表名被正确引用，以防特殊字符
                        drop_stmt = text(f'DROP TABLE public."{table_name}" CASCADE;')
                        connection.execute(drop_stmt)
                        logger.info(f"已成功删除向量表: '{table_name}'")
                        
                except Exception as e:
                    logger.error(f"根据前缀 '{safe_prefix}' 删除向量表时发生数据库错误: {e}", exc_info=True)
                    raise

    async def list_collections_with_prefix(self, prefix: str) -> list[str]:
        """
        [新增] 根据前缀，发现所有匹配的向量集合名称。
        用于自动发现多路召回的所有路径。
        """
        if not self._engine:
            raise RuntimeError("数据库引擎未被正确初始化。")

        # PGVectorStore 自动将集合名称转换为表名，方法是添加 "data_" 前缀
        # 并对特殊字符进行清理。我们需要模拟这个过程来找到正确的表。
        # 注意: LlamaIndex 的 PGVectorStore 默认将表名转换为小写。
        safe_prefix = f"data_{re.sub(r'[^a-z0-9_]', '_', prefix.lower())}"
        
        collection_names = []
        with self._engine.connect() as connection:
            try:
                find_tables_stmt = text(
                    "SELECT table_name FROM information_schema.tables "
                    "WHERE table_schema = 'public' AND table_name LIKE :prefix"
                )
                result = connection.execute(find_tables_stmt, {"prefix": f"{safe_prefix}%"})
                table_names = [row[0] for row in result.fetchall()]
                
                # 将表名转换回集合名 (移除 "data_" 前缀)
                for table_name in table_names:
                    if table_name.startswith("data_"):
                        collection_names.append(table_name[len("data_"):])
                
                logger.debug(f"为前缀 '{prefix}' 找到了 {len(collection_names)} 个集合: {collection_names}")
                return collection_names

            except Exception as e:
                logger.error(f"根据前缀 '{prefix}' 查找集合时发生数据库错误: {e}", exc_info=True)
                # 即使发生错误，也返回一个空列表，以防上层调用者出现 TypeError
                return []

    async def collection_exists(self, collection_name: str) -> bool:
        if not self._engine:
            raise RuntimeError("数据库引擎未被正确初始化。")
            
        table_name = f"data_{collection_name}"
        with self._engine.connect() as connection:
            return self._engine.dialect.has_table(connection, table_name)

    def _create_store(self, collection_name: str, embed_dim: int) -> "VectorStoreInterface":
        """
        最终的、回归本源的正确实现：
        1. 使用独立于主应用的、专为向量库配置的引擎。
        2. 不再手动创建 Table 对象，避免与 LlamaIndex 内部 ORM 模型冲突。
        3. 直接调用 PGVectorStore 构造函数，让其在数据库中自动创建一张
           结构完全符合其内部约定的表。这能从根本上保证兼容性。
        """
        if not (self._engine and self._async_engine):
            raise RuntimeError("数据库引擎未被正确初始化。")

        # 直接调用构造函数，LlamaIndex 会负责处理表的创建和 ORM 映射
        native_store = NativePGVectorStore(
            engine=self._engine,
            async_engine=self._async_engine,
            table_name=collection_name,
            embed_dim=embed_dim,
            hybrid_search=True,  # [修正] 显式启用混合搜索支持
            text_search_config="simple",  # [修正] 使用对多语言最通用的 'simple' 配置
        )

        adapter = PGVectorStoreAdapter(
            native_store=native_store,
            collection_name=collection_name
        )
        return adapter 