"""
数据库模块 - 基础数据映射器 (BaseMapper)

提供一个功能丰富、完全异步的通用数据访问层，封装了绝大部分常见的 CRUD 操作。
其 API 设计深受您提供的 BaseRepository 启发。
"""

import abc
import logging
from contextlib import asynccontextmanager
from datetime import datetime
from typing import Any, AsyncGenerator, Dict, Generic, List, Optional, Tuple, Type, TypeVar, get_args, Union

from sqlalchemy import delete as sqlalchemy_delete
from sqlalchemy import func, select, text, update, inspect
from sqlalchemy.dialects.mysql import insert as mysql_insert
from sqlalchemy.dialects.postgresql import insert as postgresql_insert
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
from sqlalchemy.sql.expression import ColumnElement

from axiom_boot.core.id_generator import IdGenerator
from axiom_boot.database.base_model import BaseModel
from axiom_boot.database.transaction import _current_session
from axiom_boot.di import autowired
from axiom_boot.api.models import PaginationParams, FilterParams, PaginatedResponse


T = TypeVar("T", bound=BaseModel)
logger = logging.getLogger(__name__)


class BaseMapper(Generic[T], metaclass=abc.ABCMeta):
    """
    一个异步的、通用的数据映射器基类，封装了丰富的 CRUD 功能。

    此类设计为可由 DI 容器管理的组件。它通过属性注入自动获取
    `session_factory` 和 `id_generator`，并使用异步上下文管理器来
    确保每个数据库操作都在一个独立的、生命周期安全的会话中执行。
    它还能自动从子类的泛型定义中推断出要操作的模型实体。
    """
    db_session_factory: async_sessionmaker[AsyncSession] = autowired("async_sessionmaker")
    id_generator: IdGenerator = autowired()

    def __init__(self):
        """
        初始化基础映射器。

        此构造函数会自动解析子类中定义的泛型类型 `T`，
        以确定该 Mapper 实例应操作的具体 SQLAlchemy 模型。
        例如，如果一个子类定义为 `class UserMapper(BaseMapper[User]):`，
        那么 `self.model` 将被自动设置为 `User` 类。
        """
        # 利用 __orig_bases__ 和 get_args 从子类的泛型定义中解析出模型 T
        type_args = get_args(self.__class__.__orig_bases__[0])
        if not type_args:
            raise TypeError(
                f"无法从 {self.__class__.__name__} 的泛型基类中推断出模型类型。"
                "请确保您的 Mapper 类继承自 BaseMapper 并指定了模型，例如："
                f"class {self.__class__.__name__}(BaseMapper[YourModel]): pass"
            )
        self.model: Type[T] = type_args[0]
        self._pk_name = self._get_primary_key_name()

    def _get_primary_key_name(self) -> str:
        """使用 inspect 动态获取模型的主键字段名。"""
        pk_columns = inspect(self.model).primary_key
        if not pk_columns:
            raise TypeError(f"模型 {self.model.__name__} 没有定义主键。")
        return pk_columns[0].name

    @asynccontextmanager
    async def _session_scope(self) -> AsyncGenerator[AsyncSession, None]:
        """
        提供一个数据库会话的异步上下文管理器。
        它能自动感知是否存在由 @transactional 管理的外部事务。
        """
        # 检查是否存在活动的外部事务
        session = _current_session.get()
        if session:
            # 如果存在，直接使用该会话，不管理其生命周期 (事务由 @transactional 管理)
            yield session
        else:
            # 如果不存在，则创建一个新的会话并管理其生命周期 (单个操作的事务)
            async with self.db_session_factory() as new_session:
                # 'async with' 上下文管理器会自动处理 begin, commit/rollback, 和 close
                yield new_session

    def _get_dialect_name(self, session: AsyncSession) -> str:
        """获取当前会话的数据库方言名称。"""
        return session.bind.dialect.name

    async def _set_create_fields(self, model: T) -> None:
        """设置创建记录时的通用字段。"""
        if not model.id:
            model.id = await self.id_generator.generate_id()
        # 修正：BaseModel 中定义的字段是 create_time 和 update_time
        model.create_time = datetime.now()
        model.update_time = datetime.now()
        model.is_deleted = 0

    def _prepare_update_dict(self, values: Dict[str, Any]) -> Dict[str, Any]:
        """准备用于更新操作的字典，自动添加更新时间。"""
        # 修正：BaseModel 中定义的字段是 update_time
        values["update_time"] = datetime.now()
        return values

    def _apply_filters_to_statement(self, stmt: Any, filters: Dict[str, Any], include_deleted: bool = False) -> Any:
        """将过滤条件应用于 SQLAlchemy 的 select 或 update/delete 语句。"""
        effective_filters = filters.copy()
        
        # 根据 include_deleted 标志来决定是否过滤 is_deleted 字段
        if not include_deleted:
            effective_filters['is_deleted'] = 0
        elif 'is_deleted' in effective_filters:
            # 如果显式要求包含所有记录，但又传入了 is_deleted，则以传入的为准
            # 如果希望完全不受 is_deleted 控制，调用者应避免在 filters 中传入 is_deleted
            pass

        filter_conditions: List[ColumnElement[bool]] = []
        simple_eq_filters: Dict[str, Any] = {}

        for key, value in effective_filters.items():
            field_name, *op_parts = key.split('__')
            operator_suffix = op_parts[0] if op_parts else 'eq'

            if not hasattr(self.model, field_name):
                continue

            column = getattr(self.model, field_name)
            op_map = {
                'eq': lambda c, v: c == v,
                'ne': lambda c, v: c != v,
                'lt': lambda c, v: c < v,
                'lte': lambda c, v: c <= v,
                'gt': lambda c, v: c > v,
                'gte': lambda c, v: c >= v,
                'in': lambda c, v: c.in_(v),
                'notin': lambda c, v: c.notin_(v),
                'like': lambda c, v: c.like(v),
                'ilike': lambda c, v: c.ilike(v),
                'is': lambda c, v: c.is_(v),
                'isnot': lambda c, v: c.isnot(v),
            }
            if operator_suffix in op_map:
                filter_conditions.append(op_map[operator_suffix](column, value))
        
        if filter_conditions:
            stmt = stmt.where(*filter_conditions)
            
        return stmt

    # --- 查询方法 ---

    async def find_by_pk(self, pk: Union[int, str], include_deleted: bool = False) -> Optional[T]:
        """根据主键异步查询单个记录。"""
        async with self._session_scope() as session:
            stmt = select(self.model).where(getattr(self.model, self._pk_name) == pk)
            if not include_deleted:
                stmt = stmt.where(self.model.is_deleted == 0)
            result = await session.execute(stmt)
            return result.scalar_one_or_none()

    async def find_one_by_filters(self, include_deleted: bool = False, **filters) -> Optional[T]:
        """根据任意条件查询单个记录。"""
        async with self._session_scope() as session:
            stmt = self._apply_filters_to_statement(select(self.model), filters, include_deleted=include_deleted)
            result = await session.execute(stmt)
            return result.scalar_one_or_none()
    
    async def find_by_filters(self, include_deleted: bool = False, **filters) -> List[T]:
        """根据任意条件查询记录列表。"""
        async with self._session_scope() as session:
            stmt = self._apply_filters_to_statement(select(self.model), filters, include_deleted=include_deleted)
            result = await session.execute(stmt)
            return list(result.scalars().all())

    # --- 计数 ---

    async def count(self, include_deleted: bool = False, **filters) -> int:
        """根据条件计算记录总数。"""
        async with self._session_scope() as session:
            stmt = self._apply_filters_to_statement(select(func.count()), filters, include_deleted=include_deleted)
            result = await session.execute(stmt)
            # 确保返回的是整数
            count_result = result.scalar_one_or_none()
            return count_result if count_result is not None else 0

    # --- 分页查询 ---

    async def page(
        self,
        pagination: PaginationParams,
        filters: Optional[Dict[str, Any]] = None,
        order_by: Optional[List[str]] = None,
        include_deleted: bool = False
    ) -> PaginatedResponse[T]:
        """
        通用异步分页查询。

        接收一个包含过滤条件的字典，并返回 PaginatedResponse 数据模型。
        """
        filter_dict = filters if filters else {}
        total = await self.count(include_deleted=include_deleted, **filter_dict)
        if total == 0:
            return PaginatedResponse.create(items=[], total=0, page=pagination.page, page_size=pagination.page_size)

        async with self._session_scope() as session:
            stmt = self._apply_filters_to_statement(select(self.model), filter_dict, include_deleted=include_deleted)

            if order_by:
                for ob_item in order_by:
                    col_name, *direction_parts = ob_item.strip().split()
                    direction = direction_parts[0].lower() if direction_parts else 'asc'
                    if hasattr(self.model, col_name):
                        col_attr = getattr(self.model, col_name)
                        stmt = stmt.order_by(col_attr.desc() if direction == 'desc' else col_attr.asc())

            offset = (pagination.page - 1) * pagination.page_size
            stmt = stmt.offset(offset).limit(pagination.page_size)
            result = await session.execute(stmt)
            data = list(result.scalars().all())

            return PaginatedResponse.create(
                items=data,
                total=total,
                page=pagination.page,
                page_size=pagination.page_size
            )

    # --- 数据操作 ---
    
    async def save(self, model: T) -> T:
        """异步保存一个新记录。"""
        await self._set_create_fields(model)
        async with self._session_scope() as session:
            session.add(model)
            # 1. 将待处理的变更发送到数据库执行，这会填充 ID。
            await session.flush()
            # 2. 用数据库中的最新状态（包括新ID）刷新 Python 对象。
            await session.refresh(model)
            return model

    async def update_by_pk(self, pk: Union[int, str], values: Dict[str, Any]) -> Optional[T]:
        """
        根据主键异步更新记录。
        此方法会智能地读取模型字段上定义的 'update_strategy' 元数据，
        以实现灵活的、字段级别的部分更新。

        Args:
            id: 要更新的记录的ID。
            values: 一个包含待更新字段和值的字典。通常这是由 Pydantic 的
                    `model_dump(exclude_unset=True)` 生成的。

        Returns:
            更新后的模型对象，如果记录未找到则返回 None。
        """
        if not values:
            async with self._session_scope() as session:
                return await self._find_by_pk_in_session(int(str(pk).strip()), session)

        final_update_values = {}
        for key, value in values.items():
            if not hasattr(self.model, key):
                continue

            column = getattr(self.model, key).expression
            strategy = column.info.get('update_strategy', 'ignore_none')

            if strategy == 'strict':
                final_update_values[key] = value
            elif strategy == 'ignore_none':
                if value is not None:
                    final_update_values[key] = value
        
        if not final_update_values:
            async with self._session_scope() as session:
                return await self._find_by_pk_in_session(int(str(pk).strip()), session)

        async with self._session_scope() as session:
            # 显式转换ID类型，确保数据库驱动兼容性
            entity_pk = int(str(pk).strip())
            # 否则，构建一个不返回任何内容的 update 语句
            stmt = update(self.model).where(getattr(self.model, self._pk_name) == entity_pk).values(self._prepare_update_dict(final_update_values))
            
            # --- [最终修正] 增加安全回退的日志记录 ---
            try:
                dialect = session.bind.dialect
                # 优先尝试生成最可读的日志
                compiled_stmt = stmt.compile(dialect=dialect, compile_kwargs={"literal_binds": True})
                logger.debug(f"Executing compiled UPDATE statement: {compiled_stmt}")
            except Exception:
                # 如果字面值绑定失败（例如遇到JSON等复杂类型），则回退到标准编译模式
                try:
                    compiled_stmt = stmt.compile(dialect=dialect, compile_kwargs={"literal_binds": False})
                    logger.debug(f"Executing UPDATE statement (fallback log): {compiled_stmt.string} PARAMS: {compiled_stmt.params}")
                except Exception as e:
                    # 如果标准编译也失败，则记录最终错误
                    logger.error(f"Could not compile UPDATE statement for logging AT ALL: {e}")

            await session.execute(stmt)
            # 再次尝试获取更新后的实体
            return await self._find_by_pk_in_session(entity_pk, session)

    async def delete_by_pk(self, pk: Union[int, str], hard_delete: bool = False) -> bool:
        """
        根据主键删除记录（支持软删除和物理删除）。
        """
        async with self._session_scope() as session:
            # 显式转换ID类型，确保数据库驱动兼容性
            entity_pk = int(str(pk).strip())
            if hard_delete:
                stmt = sqlalchemy_delete(self.model).where(getattr(self.model, self._pk_name) == entity_pk)
            else:
                stmt = update(self.model).where(getattr(self.model, self._pk_name) == entity_pk).values({
                    "is_deleted": 1,
                    "update_time": datetime.now()
                })
            
            # --- [最终修正] 增加安全回退的日志记录 ---
            try:
                dialect = session.bind.dialect
                # 优先尝试生成最可读的日志
                compiled_stmt = stmt.compile(dialect=dialect, compile_kwargs={"literal_binds": True})
                logger.debug(f"Executing compiled DELETE/UPDATE statement: {compiled_stmt}")
            except Exception:
                # 如果字面值绑定失败，则回退到标准编译模式
                try:
                    compiled_stmt = stmt.compile(dialect=dialect, compile_kwargs={"literal_binds": False})
                    logger.debug(f"Executing DELETE/UPDATE statement (fallback log): {compiled_stmt.string} PARAMS: {compiled_stmt.params}")
                except Exception as e:
                    # 如果标准编译也失败，则记录最终错误
                    logger.error(f"Could not compile DELETE statement for logging AT ALL: {e}")
            
            result = await session.execute(stmt)
            logger.debug(f"Statement executed. Result rowcount: {result.rowcount}")
            return result.rowcount > 0

    async def soft_delete_by_pks(self, pks: List[Union[int, str]]) -> bool:
        """
        根据主键列表批量软删除记录。
        """
        if not pks:
            return True
            
        async with self._session_scope() as session:
            stmt = update(self.model).where(getattr(self.model, self._pk_name).in_(pks)).values({
                "is_deleted": 1,
                "update_time": datetime.now()
            })
            
            result = await session.execute(stmt)
            return result.rowcount > 0

    async def delete_by_pks(self, pks: List[Union[int, str]], hard_delete: bool = False) -> int:
        """
        根据主键列表批量删除记录。

        Args:
            pks: 主键 ID 列表。
            hard_delete: 如果为 True，则物理删除；否则进行软删除。

        Returns:
            受影响的行数。
        """
        if not pks:
            return 0

        async with self._session_scope() as session:
            if hard_delete:
                stmt = sqlalchemy_delete(self.model).where(getattr(self.model, self._pk_name).in_(pks))
            else:
                stmt = update(self.model).where(getattr(self.model, self._pk_name).in_(pks)).values({
                    "is_deleted": 1,
                    "update_time": datetime.now()
                })
            
            result = await session.execute(stmt)
            return result.rowcount

    async def _find_by_pk_in_session(self, pk: int, session: AsyncSession) -> Optional[T]:
        # session.get 本身就能很好地处理 str 和 int 类型的 PK
        return await session.get(self.model, pk)

    async def batch_save(self, models: List[T]) -> bool:
        """批量异步保存新记录。"""
        if not models:
            return True
        for model in models:
            await self._set_create_fields(model)
        async with self._session_scope() as session:
            session.add_all(models)
            await session.flush()
        return True

    async def batch_upsert(self, models: List[Dict[str, Any]], unique_index_elements: List[str]) -> bool:
        """批量更新插入（Upsert）。自动适配 MySQL 和 PostgreSQL。"""
        if not models:
            return True
        
        async with self._session_scope() as session:
            dialect_name = self._get_dialect_name(session)
            if dialect_name == "mysql":
                stmt = mysql_insert(self.model).values(models)
                update_cols = {c.name: c for c in stmt.inserted if c.name not in unique_index_elements + ["id", "created_at"]}
                stmt = stmt.on_duplicate_key_update(**update_cols)
            elif dialect_name == "postgresql":
                stmt = postgresql_insert(self.model).values(models)
                update_cols = {col: getattr(stmt.excluded, col) for col in models[0].keys() if col not in unique_index_elements + ["id", "created_at"]}
                stmt = stmt.on_conflict_do_update(index_elements=unique_index_elements, set_=update_cols)
            else:
                raise NotImplementedError(f"Batch upsert is not implemented for dialect '{dialect_name}'")

            await session.execute(stmt)
            await session.flush()
        return True 