"""
PostgreSQL 数据库连接管理
"""
import os
from typing import AsyncGenerator, Dict, Any
from datetime import datetime
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession, async_sessionmaker
from sqlalchemy.orm import declarative_base
from sqlalchemy import text, inspect

# 从环境变量获取数据库配置
POSTGRES_HOST = os.getenv("POSTGRES_HOST", "localhost")
POSTGRES_PORT = os.getenv("POSTGRES_PORT", "5432")
POSTGRES_DATABASE = os.getenv("POSTGRES_DATABASE", "yuxi_know")
POSTGRES_USER = os.getenv("POSTGRES_USER", "yuxi_know")
POSTGRES_PASSWORD = os.getenv("POSTGRES_PASSWORD", "yuxi_know_2025")

# 清理主机名（移除可能的协议前缀）
if POSTGRES_HOST.startswith("http://"):
    POSTGRES_HOST = POSTGRES_HOST[7:]
elif POSTGRES_HOST.startswith("https://"):
    POSTGRES_HOST = POSTGRES_HOST[8:]

# 验证端口号
try:
    port_int = int(POSTGRES_PORT)
    if port_int <= 0 or port_int > 65535:
        raise ValueError(f"端口号 {port_int} 超出有效范围 (1-65535)")
except ValueError as e:
    raise ValueError(f"无效的数据库端口号 '{POSTGRES_PORT}': {e}")

# 构建异步数据库连接 URL
DATABASE_URL = f"postgresql+asyncpg://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_HOST}:{POSTGRES_PORT}/{POSTGRES_DATABASE}"

print(f"[数据库调试] 数据库连接URL: {DATABASE_URL}")  # 调试信息

# 创建异步引擎
engine = create_async_engine(
    DATABASE_URL,
    echo=False,  # 设置为 True 可以看到 SQL 日志
    pool_size=5,  # 减小连接池大小以减少连接复用问题
    max_overflow=10,
    pool_pre_ping=True,  # 连接池预检查
    pool_recycle=3600,  # 1小时回收连接，避免长连接问题
    connect_args={
        "server_settings": {
            "application_name": "yuxi_know_situation",
            "timezone": "UTC"
        }
    }
)

# 创建异步 Session 工厂
AsyncSessionLocal = async_sessionmaker(
    engine,
    class_=AsyncSession,
    expire_on_commit=False,
    autocommit=False,
    autoflush=False
)

# 声明基类
Base = declarative_base()


async def get_db() -> AsyncGenerator[AsyncSession, None]:
    """
    依赖注入函数：获取数据库会话

    使用方式:
        @router.get("/")
        async def get_items(db: AsyncSession = Depends(get_db)):
            ...
    """
    async with AsyncSessionLocal() as session:
        try:
            # 设置会话级别的search_path，确保表查找的一致性
            await session.execute(text("SET search_path TO public"))

            yield session
            await session.commit()
        except Exception:
            await session.rollback()
            raise
        finally:
            await session.close()


async def get_db_with_retry(max_retries: int = 3) -> AsyncGenerator[AsyncSession, None]:
    """
    带重试机制的数据库会话获取函数，用于处理随机性的连接问题
    """
    from src.utils import logger

    async def check_table_exists_local(db_session: AsyncSession) -> bool:
        """本地表检查函数，避免循环导入"""
        try:
            result = await db_session.execute(text("""
                SELECT EXISTS (
                    SELECT FROM information_schema.tables
                    WHERE table_schema = 'public'
                    AND table_name = 'situation_objects'
                )
            """))
            exists = result.scalar()
            logger.info(f"[调试] situation_objects 表存在性检查: {exists}")
            return exists
        except Exception as e:
            logger.error(f"[调试] 检查表存在性失败: {e}")
            return False

    for attempt in range(max_retries):
        async with AsyncSessionLocal() as session:
            try:
                # 设置会话级别的search_path
                await session.execute(text("SET search_path TO public"))

                # 验证表是否存在（使用本地函数避免循环导入）
                if await check_table_exists_local(session):
                    yield session
                    await session.commit()
                    return
                else:
                    logger.warning(f"[数据库重试] 第 {attempt + 1} 次尝试：表不存在，尝试重新初始化...")
                    if attempt == max_retries - 1:
                        # 最后一次尝试，强制创建表
                        await manual_init_db()
                        raise Exception("数据库表创建失败，请检查数据库权限和配置")

            except Exception as e:
                logger.warning(f"[数据库重试] 第 {attempt + 1} 次尝试失败: {e}")
                await session.rollback()
                if attempt == max_retries - 1:
                    raise
                else:
                    # 等待一段时间后重试
                    import asyncio
                    await asyncio.sleep(0.1 * (attempt + 1))


async def init_db():
    """初始化数据库表"""
    from src.utils import logger

    try:
        # 首先尝试创建数据库（如果不存在）
        logger.info(f"检查数据库 {POSTGRES_DATABASE} 是否存在...")

        # 使用默认的 postgres 数据库连接来创建目标数据库
        temp_url = f"postgresql+asyncpg://{POSTGRES_USER}:{POSTGRES_PASSWORD}@{POSTGRES_HOST}:{POSTGRES_PORT}/postgres"
        temp_engine = create_async_engine(temp_url, echo=False)

        async with temp_engine.begin() as conn:
            # 检查数据库是否存在，不存在则创建
            result = await conn.execute(
                text("SELECT 1 FROM pg_database WHERE datname = :db_name"),
                {"db_name": POSTGRES_DATABASE}
            )
            db_exists = result.scalar()

            if not db_exists:
                logger.info(f"数据库 {POSTGRES_DATABASE} 不存在，正在创建...")
                await conn.execute(
                    text(f"CREATE DATABASE {POSTGRES_DATABASE}")
                )
                logger.info(f"数据库 {POSTGRES_DATABASE} 创建成功")
            else:
                logger.info(f"数据库 {POSTGRES_DATABASE} 已存在")

        await temp_engine.dispose()

        # 连接到目标数据库并检查表是否存在
        logger.info("检查数据库表是否存在...")

        async with engine.begin() as conn:
            # 检查 public schema 中是否有表
            tables = await conn.run_sync(lambda sync_conn: inspect(sync_conn).get_table_names(schema='public'))
            logger.info(f"public schema 中的表: {tables}")

            # 检查所有必需的表是否都存在
            required_tables = {
                'situation_objects',
                'situation_relationships',
                'data_sources',
                'sync_tasks',
                'field_mappings',
                'sync_logs'
            }
            existing_tables = set(tables)
            missing_tables = required_tables - existing_tables

            if not missing_tables:
                logger.info("所有态势对象相关表已存在，无需重新创建")
                logger.info(f"已存在的表: {required_tables}")
                return

            logger.info(f"缺少的表: {missing_tables}，开始创建...")

            # 设置 search_path 为 public
            await conn.execute(text("SET search_path TO public"))

            # 强制设置模型的 schema 为 public（延迟导入避免循环依赖）
            from src.situation.models import SituationObject
            from src.situation.relationship_models import SituationRelationship
            from src.situation.sync_models import DataSource, SyncTask, FieldMapping, SyncLog

            SituationObject.__table__.schema = 'public'
            SituationRelationship.__table__.schema = 'public'
            DataSource.__table__.schema = 'public'
            SyncTask.__table__.schema = 'public'
            FieldMapping.__table__.schema = 'public'
            SyncLog.__table__.schema = 'public'

            logger.info("开始在 public schema 中创建表...")

            # 显式导入所有模型以确保它们被注册到Base.metadata中
            try:
                from src.situation.models import SituationObject
                from src.situation.relationship_models import SituationRelationship
                from src.situation.sync_models import DataSource, SyncTask, FieldMapping, SyncLog

                logger.info("已导入所有态势对象相关模型")

                # 确保所有表的schema都设置为public
                SituationObject.__table__.schema = 'public'
                SituationRelationship.__table__.schema = 'public'
                DataSource.__table__.schema = 'public'
                SyncTask.__table__.schema = 'public'
                FieldMapping.__table__.schema = 'public'
                SyncLog.__table__.schema = 'public'

                logger.info("已设置所有表schema为public")

                # 打印将要创建的表
                logger.info(f"准备创建的表: {list(Base.metadata.tables.keys())}")

            except Exception as import_error:
                logger.error(f"导入模型时出错: {import_error}")
                raise

            # 创建所有表
            await conn.run_sync(Base.metadata.create_all)
            logger.info("表创建SQL执行完成")

            # 验证表是否创建成功
            tables = await conn.run_sync(lambda sync_conn: inspect(sync_conn).get_table_names(schema='public'))
            current_tables = set(tables)

            if required_tables.issubset(current_tables):
                logger.info(f"✅ 所有态势对象相关表在 public schema 中创建成功")
                logger.info(f"public schema 中的表: {tables}")
            else:
                still_missing = required_tables - current_tables
                logger.error(f"❌ 表创建失败，仍然缺少: {still_missing}")
                logger.error(f"public schema 中的表: {tables}")
                raise Exception(f"表创建失败，缺少: {still_missing}")

    except Exception as e:
        logger.error(f"数据库初始化失败: {e}")
        import traceback
        logger.error(f"详细错误信息: {traceback.format_exc()}")

        # 最后的备用方案：不阻止应用启动
        logger.warning("⚠️ 数据库初始化失败，应用将继续启动，但态势对象功能可能不可用")

        # 尝试至少记录当前数据库状态
        try:
            async with engine.begin() as conn:
                tables = await conn.run_sync(lambda sync_conn: inspect(sync_conn).get_table_names())
                logger.info(f"当前数据库中的所有表: {tables}")
        except Exception as debug_error:
            logger.error(f"无法获取表列表: {debug_error}")


async def manual_init_db():
    """手动初始化数据库表，用于调试和修复"""
    from src.utils import logger

    logger.info("🔧 开始手动初始化数据库...")

    try:
        # 连接到目标数据库
        async with engine.begin() as conn:
            # 显示当前数据库信息
            result = await conn.execute(text("SELECT current_database()"))
            current_db = result.scalar()
            logger.info(f"当前数据库: {current_db}")

            # 检查现有表
            tables = await conn.run_sync(lambda sync_conn: inspect(sync_conn).get_table_names())
            logger.info(f"当前数据库中的表: {tables}")

            # 检查 public schema 中的表
            public_tables = await conn.run_sync(lambda sync_conn: inspect(sync_conn).get_table_names(schema='public'))
            logger.info(f"public schema 中的表: {public_tables}")

            # 检查所有必需的表
            required_tables = {
                'situation_objects',
                'situation_relationships',
                'data_sources',
                'sync_tasks',
                'field_mappings',
                'sync_logs'
            }
            existing_tables = set(public_tables)
            missing_tables = required_tables - existing_tables

            # 如果有表不存在，强制创建
            if missing_tables:
                logger.info(f"🔨 缺少的表: {missing_tables}，强制创建...")

                # 强制在 public schema 中创建
                # 显式导入所有模型以确保它们被注册到Base.metadata中
                try:
                    from src.situation.models import SituationObject
                    from src.situation.relationship_models import SituationRelationship
                    from src.situation.sync_models import DataSource, SyncTask, FieldMapping, SyncLog

                    logger.info("手动初始化: 已导入所有态势对象相关模型")

                    # 确保所有表的schema都设置为public
                    SituationObject.__table__.schema = 'public'
                    SituationRelationship.__table__.schema = 'public'
                    DataSource.__table__.schema = 'public'
                    SyncTask.__table__.schema = 'public'
                    FieldMapping.__table__.schema = 'public'
                    SyncLog.__table__.schema = 'public'

                    logger.info("手动初始化: 已设置所有表schema为public")
                    logger.info(f"手动初始化: 准备创建的表: {list(Base.metadata.tables.keys())}")

                except Exception as import_error:
                    logger.error(f"手动初始化: 导入模型时出错: {import_error}")
                    raise

                await conn.run_sync(Base.metadata.create_all)
                logger.info("✅ 手动初始化: 表创建SQL执行完成")

                # 再次验证
                new_tables = await conn.run_sync(lambda sync_conn: inspect(sync_conn).get_table_names(schema='public'))
                new_tables_set = set(new_tables)

                if required_tables.issubset(new_tables_set):
                    logger.info("✅ 所有态势对象相关表创建成功")
                    logger.info(f"public schema 现在包含的表: {new_tables}")
                else:
                    still_missing = required_tables - new_tables_set
                    logger.error(f"❌ 表创建失败，仍然缺少: {still_missing}")
            else:
                logger.info("✅ 所有态势对象相关表已存在")

    except Exception as e:
        logger.error(f"❌ 手动初始化失败: {e}")
        import traceback
        logger.error(f"详细错误: {traceback.format_exc()}")
        raise


async def health_check() -> Dict[str, Any]:
    """
    数据库健康检查，用于监控系统状态
    """
    from src.utils import logger

    health_status = {
        "database_connected": False,
        "situation_objects_exists": False,
        "situation_relationships_exists": False,
        "data_sources_exists": False,
        "sync_tasks_exists": False,
        "field_mappings_exists": False,
        "sync_logs_exists": False,
        "all_tables_exist": False,
        "connection_pool_size": 0,
        "error_message": None,
        "timestamp": datetime.utcnow().isoformat()
    }

    try:
        # 测试数据库连接
        async with engine.begin() as conn:
            # 基础连接测试
            result = await conn.execute(text("SELECT 1"))
            if result.scalar() == 1:
                health_status["database_connected"] = True

            # 检查表是否存在
            tables = await conn.run_sync(lambda sync_conn: inspect(sync_conn).get_table_names(schema='public'))
            tables_set = set(tables)

            health_status["situation_objects_exists"] = 'situation_objects' in tables_set
            health_status["situation_relationships_exists"] = 'situation_relationships' in tables_set
            health_status["data_sources_exists"] = 'data_sources' in tables_set
            health_status["sync_tasks_exists"] = 'sync_tasks' in tables_set
            health_status["field_mappings_exists"] = 'field_mappings' in tables_set
            health_status["sync_logs_exists"] = 'sync_logs' in tables_set

            # 检查所有必需的表
            required_tables = {
                'situation_objects',
                'situation_relationships',
                'data_sources',
                'sync_tasks',
                'field_mappings',
                'sync_logs'
            }
            health_status["all_tables_exist"] = required_tables.issubset(tables_set)

            # 获取连接池状态
            pool = engine.pool
            health_status["connection_pool_size"] = pool.size()
            health_status["connections_in_use"] = pool.checkedout()
            health_status["connections_overflow"] = pool.overflow()

        logger.info(f"[健康检查] 数据库状态: {health_status}")

    except Exception as e:
        health_status["error_message"] = str(e)
        logger.error(f"[健康检查] 数据库健康检查失败: {e}")

    return health_status


async def ensure_table_exists():
    """
    确保表存在的函数，在应用启动时调用
    """
    from src.utils import logger

    try:
        # 检查健康状态
        health = await health_check()

        if health["database_connected"] and not health["all_tables_exist"]:
            logger.warning("⚠️ 数据库连接正常但部分表不存在，尝试重新创建...")
            logger.warning(f"   situation_objects 表: {'存在' if health['situation_objects_exists'] else '不存在'}")
            logger.warning(f"   situation_relationships 表: {'存在' if health['situation_relationships_exists'] else '不存在'}")
            await manual_init_db()
            return True
        elif health["database_connected"] and health["all_tables_exist"]:
            logger.info("✅ 数据库和所有表都正常")
            return True
        else:
            logger.error(f"❌ 数据库连接异常: {health.get('error_message', 'Unknown error')}")
            return False

    except Exception as e:
        logger.error(f"❌ 确保表存在失败: {e}")
        return False


async def get_db_session() -> AsyncGenerator[AsyncSession, None]:
    """
    获取数据库会话的便捷函数，用于服务层内部调用
    """
    async with AsyncSessionLocal() as session:
        try:
            await session.execute(text("SET search_path TO public"))
            yield session
            await session.commit()
        except Exception:
            await session.rollback()
            raise
        finally:
            await session.close()
