import logging
import os
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, declarative_base
from database.serializer import SQLAlchemySerializer
from typing import Optional, Set
from database.settings import settings
from logger import logger

# 创建基础类（用于模型定义）
Base = declarative_base()

# 数据库配置参数
DEFAULT_DB_CONFIG = {
    "pool_pre_ping": True,          # 自动检查连接有效性
    "pool_recycle": 28000,          # 7小时53分钟回收连接（略小于MySQL默认8小时超时）
    "pool_size": 5,                 # 连接池大小
    "max_overflow": 10,             # 最大溢出连接数
    "connect_args": {"connect_timeout": 10}
}


def configure_sql_logging():
    """配置SQLAlchemy日志输出，使用settings中的配置"""
    sql_log_config = settings.get_sql_log_config()

    if not sql_log_config["enabled"]:
        return

    # 获取日志级别
    log_level = logging.DEBUG if sql_log_config["detail"] else logging.INFO

    # 配置SQLAlchemy引擎日志
    engine_logger = logging.getLogger('sqlalchemy.engine')
    engine_logger.setLevel(log_level)
    engine_logger.propagate = False  # 防止日志向上级传播

    # 创建控制台处理器
    console_handler = logging.StreamHandler()
    console_handler.setLevel(log_level)
    console_formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(name)s: %(message)s')
    console_handler.setFormatter(console_formatter)

    # 清除可能已存在的处理器
    engine_logger.handlers = []
    engine_logger.addHandler(console_handler)

    # 如果设置了日志文件，添加文件处理器
    if sql_log_config["log_file"]:
        try:
            # 确保目录存在
            log_dir = os.path.dirname(sql_log_config["log_file"])
            if log_dir and not os.path.exists(log_dir):
                os.makedirs(log_dir, exist_ok=True)

            file_handler = logging.FileHandler(sql_log_config["log_file"])
            file_handler.setLevel(log_level)
            file_formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(message)s')
            file_handler.setFormatter(file_formatter)
            engine_logger.addHandler(file_handler)
            logging.info(f"SQL日志将输出到: {sql_log_config['log_file']}")
        except Exception as e:
            logging.error(f"无法创建SQL日志文件 {sql_log_config['log_file']}: {str(e)}")

    # 配置连接池日志（可选）
    pool_logger = logging.getLogger('sqlalchemy.pool')
    pool_logger.setLevel(logging.INFO)
    # 清除可能已存在的处理器
    pool_logger.handlers = []
    pool_logger.addHandler(console_handler)
    if sql_log_config["log_file"]:
        pool_logger.addHandler(file_handler)


def create_database_engine(database_url: str = None):
    # 应用SQL日志配置
    configure_sql_logging()

    # 从配置获取数据库URL
    if database_url is None:
        database_url = settings.DATABASE_URL

    # 创建引擎
    return create_engine(database_url, **DEFAULT_DB_CONFIG)


# 创建全局引擎实例
engine = create_database_engine()

class DBSessionManager:
    """
    数据库会话管理器 - 解决所有常见数据库连接问题

    特性:
    - 自动处理连接超时问题 (pool_pre_ping, pool_recycle)
    - 自动序列化结果，避免"Instance is not bound to a Session"错误
    - 优化批量处理性能
    - 灵活的事务控制
    - 详细的错误日志
    """

    def __init__(self,
                 engine=None,
                 auto_commit: bool = True,
                 serialize_results: bool = True,
                 datetime_format: Optional[str] = None,
                 exclude_fields: Optional[Set[str]] = None,
                 batch_size: int = 500):
        """
        初始化DBSessionManager

        Args:
            engine: 可选的自定义引擎，如果不提供则使用默认引擎
            auto_commit: 是否自动提交事务
            serialize_results: 是否自动序列化查询结果
            datetime_format: 日期时间格式（如果指定，则覆盖默认格式）
            exclude_fields: 要排除的字段集合
            batch_size: 默认批量处理大小
        """
        self.engine = engine or self._get_default_engine()
        self.auto_commit = auto_commit
        self.serialize_results = serialize_results
        self.datetime_format = datetime_format
        self.exclude_fields = exclude_fields
        self.batch_size = batch_size
        self._serializer = None
        self.session = None

    def _get_default_engine(self):
        """获取默认数据库引擎"""


        # 从配置获取数据库URL
        database_url = settings.DATABASE_URL

        # 创建引擎（使用默认配置，可被覆盖）
        return create_engine(database_url, **DEFAULT_DB_CONFIG)

    def _get_serializer(self):
        """获取配置好的序列化器"""
        if self._serializer is None:
            # 创建带自定义配置的序列化器
            kwargs = {}
            if self.datetime_format:
                kwargs['datetime_format'] = self.datetime_format
            if self.exclude_fields:
                kwargs['exclude_fields'] = set(self.exclude_fields)

            self._serializer = SQLAlchemySerializer(**kwargs)
        return self._serializer

    def __enter__(self):
        """进入上下文管理器"""
        SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=self.engine)
        self.session = SessionLocal()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        """退出上下文管理器，处理事务和会话关闭"""
        try:
            if exc_type:
                self.session.rollback()
                logger.error(f"数据库事务回滚: {exc_val}")
                return False
            elif self.auto_commit:
                self.session.commit()
        except Exception as e:
            logger.error(f"事务提交失败: {str(e)}")
            self.session.rollback()
            raise
        finally:
            self.session.close()
            self.session = None

        return False

    def query(self, *entities):
        """获取查询对象"""
        return self.session.query(*entities)

    def all(self, query):
        """执行查询并返回结果（自动序列化如果启用）"""
        results = query.all()
        if self.serialize_results:
            return self._get_serializer().serialize(results)
        return results

    def first(self, query):
        """获取第一个结果（自动序列化如果启用）"""
        result = query.first()
        if self.serialize_results and result is not None:
            return self._get_serializer().serialize(result)
        return result

    def scalar(self, query):
        """获取标量结果（不序列化）"""
        return query.scalar()

    def execute(self, *args, **kwargs):
        """执行原生SQL（不序列化）"""
        return self.session.execute(*args, **kwargs)

    def add(self, obj):
        """添加对象"""
        self.session.add(obj)

    def add_all(self, objects):
        """批量添加对象"""
        self.session.add_all(objects)

    def bulk_save_objects(self, objects, return_defaults=False, update_changed_only=True):
        """批量保存对象（优化内存使用）"""
        self.session.bulk_save_objects(objects,
                                       return_defaults=return_defaults,
                                       update_changed_only=update_changed_only)

    def bulk_insert_mappings(self, entity, mappings, return_defaults=False, render_nulls=False):
        """使用字典映射进行批量插入（更高效）"""
        self.session.bulk_insert_mappings(entity, mappings,
                                          return_defaults=return_defaults,
                                          render_nulls=render_nulls)

    def bulk_update_mappings(self, entity, mappings):
        """使用字典映射进行批量更新"""
        self.session.bulk_update_mappings(entity, mappings)

    def commit(self):
        """提交事务"""
        self.session.commit()

    def rollback(self):
        """回滚事务"""
        self.session.rollback()

    def refresh(self, obj):
        """刷新对象状态"""
        self.session.refresh(obj)

    def expunge(self, obj):
        """从会话中移除对象"""
        self.session.expunge(obj)

    def expunge_all(self):
        """移除所有对象"""
        self.session.expunge_all()

    # ===== 批量处理优化方法 =====
    def batch_insert(self, entity, objects, batch_size=None):
        """
        分批次批量插入对象

        Args:
            entity: 实体类
            objects: 对象列表
            batch_size: 批次大小，默认使用实例配置

        Returns:
            int: 插入的记录数
        """
        batch_size = batch_size or self.batch_size
        total = len(objects)
        inserted = 0

        for i in range(0, total, batch_size):
            batch = objects[i:i + batch_size]
            self.bulk_save_objects(batch)

            # 每5批提交一次（避免大事务）
            if (i // batch_size) % 5 == 4 or i + batch_size >= total:
                self.commit()

            inserted += len(batch)

        return inserted

    def batch_insert_mappings(self, entity, mappings, batch_size=None):
        """
        分批次批量插入（使用字典映射，更高效）

        Args:
            entity: 实体类
            mappings: 字典列表
            batch_size: 批次大小

        Returns:
            int: 插入的记录数
        """
        batch_size = batch_size or self.batch_size
        total = len(mappings)
        inserted = 0

        for i in range(0, total, batch_size):
            batch = mappings[i:i + batch_size]
            self.bulk_insert_mappings(entity, batch)

            # 每5批提交一次
            if (i // batch_size) % 5 == 4 or i + batch_size >= total:
                self.commit()

            inserted += len(batch)

        return inserted

    def batch_update(self, entity, mappings, batch_size=None):
        """
        分批次批量更新

        Args:
            entity: 实体类
            mappings: 字典列表（必须包含主键）
            batch_size: 批次大小

        Returns:
            int: 更新的记录数
        """
        batch_size = batch_size or self.batch_size
        total = len(mappings)
        updated = 0

        for i in range(0, total, batch_size):
            batch = mappings[i:i + batch_size]
            self.bulk_update_mappings(entity, batch)

            # 每5批提交一次
            if (i // batch_size) % 5 == 4 or i + batch_size >= total:
                self.commit()

            updated += len(batch)

        return updated

    def batch_update_by_ids(self, entity, id_field, status_map, status_field='status', batch_size=None):
        """
        高效批量更新状态（使用CASE语句）

        Args:
            entity: 实体类
            id_field: ID字段名
            status_map: {id: status} 映射
            status_field: 状态字段名
            batch_size: 批次大小

        Returns:
            int: 更新的记录数
        """
        if not status_map:
            return 0

        batch_size = batch_size or self.batch_size
        total = len(status_map)
        updated = 0
        id_list = list(status_map.keys())

        # 分批次处理
        for i in range(0, total, batch_size):
            batch_ids = id_list[i:i + batch_size]
            batch_map = {k: status_map[k] for k in batch_ids}

            # 构建CASE语句
            cases = []
            params = {}
            for idx, (obj_id, status) in enumerate(batch_map.items()):
                param_name = f"status_{idx}"
                cases.append(f"WHEN {id_field} = :id_{idx} THEN :{param_name}")
                params[f"id_{idx}"] = obj_id
                params[param_name] = status

            case_statement = "CASE " + " ".join(cases) + " END"

            # 执行更新
            sql = f"""
                UPDATE {entity.__table__.name} 
                SET {status_field} = {case_statement}
                WHERE {id_field} IN ({','.join([f':id_{idx}' for idx in range(len(batch_ids))])})
            """

            result = self.execute(sql, params)
            updated += result.rowcount

            # 每5批提交一次
            if (i // batch_size) % 5 == 4 or i + batch_size >= total:
                self.commit()

        return updated

    def stream_query(self, query, batch_size=None):
        """
        流式查询处理大量数据

        Args:
            query: 查询对象
            batch_size: 批次大小

        Yields:
            批次结果
        """
        batch_size = batch_size or self.batch_size
        offset = 0

        while True:
            batch = query.limit(batch_size).offset(offset).all()
            if not batch:
                break

            yield batch
            offset += len(batch)

    def process_in_batches(self, query, process_func, batch_size=None, serialize=False):
        """
        分批次处理查询结果

        Args:
            query: 查询对象
            process_func: 处理函数
            batch_size: 批次大小
            serialize: 是否序列化结果

        Returns:
            int: 处理的记录数
        """
        batch_size = batch_size or self.batch_size
        total = 0

        for batch in self.stream_query(query, batch_size):
            if serialize and self.serialize_results:
                batch = self._get_serializer().serialize(batch)

            for item in batch:
                process_func(item)

            total += len(batch)
            logger.info(f"已处理 {total} 条记录")

        return total

    # ===== 辅助方法 =====
    def count(self, query):
        """获取查询结果总数"""
        return query.count()

    def exists(self, query):
        """检查是否存在结果"""
        return self.session.query(query.exists()).scalar()

    def get_by_id(self, entity, obj_id):
        """通过ID获取对象"""
        obj = self.session.get(entity, obj_id)
        if self.serialize_results and obj:
            return self._get_serializer().serialize(obj)
        return obj

    def delete_by_id(self, entity, obj_id):
        """通过ID删除对象"""
        obj = self.session.get(entity, obj_id)
        if obj:
            self.session.delete(obj)
            if self.auto_commit:
                self.commit()
            return True
        return False

    def delete_all(self, query):
        """删除所有匹配的对象"""
        result = query.delete(synchronize_session=False)
        if self.auto_commit:
            self.commit()
        return result