import os
from uuid import uuid4
from loguru import logger
from contextvars import ContextVar
from sqlalchemy.engine import Engine
from sqlalchemy import event
import time

# 使用任务request_id来实现全链路日志追踪
_x_request_id: ContextVar[str] = ContextVar("x_request_id", default="")  # 请求ID

class TraceID:
    """全链路日志追踪"""
    @staticmethod
    def set(req_id: str = None) -> ContextVar[str]:
        """设置请求ID"""
        if not req_id:
            req_id = uuid4().hex
        _x_request_id.set(req_id)
        return req_id
    @staticmethod
    def get() -> str:
        """获取请求ID"""
        return _x_request_id.get() or "-"

# 日志路径区分(API 与任务日志分离)
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "../.."))
LOG_DIR = os.path.join(BASE_DIR, "logs")
# 创建日志目录
os.makedirs(LOG_DIR, exist_ok=True)

API_LOG_FILE = os.path.join(LOG_DIR, "api.log")
TASK_LOG_FILE = os.path.join(LOG_DIR, "task.log")
TEST_LOG_FILE = os.path.join(LOG_DIR, "test.log")

LOG_PATHS = {
    "api": API_LOG_FILE,
    "task": TASK_LOG_FILE,
    "test": TEST_LOG_FILE,
}

# 通用日志格式
def _logger_filter(record):
    """日志过滤"""
    record['request_id'] = TraceID.get()
    return True

log_format = (
    "{time:YYYY-MM-DD HH:mm:ss.SSS} | {level: ^6} | {request_id} | {name}:{function}:{line} - {message}"
)

# 日志输出配置
params = {
    "rotation": "10 MB",  # 日志文件大小
    "encoding": "utf-8",  # 编码
    "retention": "10 days",  # 保留时间
    "enqueue": True,  # 异步写入日志
    "backtrace": True,  # 异常回溯
    "filter": _logger_filter,  # 日志过滤
    "format": log_format,  # 日志格式
}

_loggers = {}

def init_logger(mode: str = "api"):
    """动态初始化日志输出目标(api 或 task)"""
    if mode in _loggers:
        return _loggers[mode]
    
    costom_logger = logger.bind()
    log_file = LOG_PATHS.get(mode, LOG_PATHS["api"])
    # 清理默认控制台输出
    costom_logger.remove()
    costom_logger.add(log_file, level="INFO", **params)
    costom_logger.info(f"Logger initialized for {mode} mode, logging to {log_file}")
    _loggers[mode] = costom_logger
    return costom_logger

# SQLAlchemy 日志集成(自动输出到同文件)
def attach_sql_logger(engine: Engine, mode: str = "api"):
    """
    为SQLAlchemy引擎添加日志记录器
    附加SQL日志(执行耗时&行数)
    自动检测async/sync类型
    """
    log = _loggers.get(mode) or init_logger(mode)
    # 兼容异步引擎
    _engine = engine.sync_engine if hasattr(engine, "sync_engine") else engine
    log.info(f"SQLAlchemy logger attached to engine {_engine!r} for {mode} mode")

    @event.listens_for(_engine, "before_cursor_execute")
    def before_cursor_execute(conn, cursor, statement, parameters, context, executemany):
        """
        在执行SQL语句前记录日志
        :param conn: 数据库连接
        :param cursor: 数据库游标
        :param statement: SQL语句
        :param parameters: SQL参数
        :param context: 执行上下文
        :param executemany: 是否批量执行
        """
        context._query_start_time = time.perf_counter()
        log.info(f"SQL EXEC: {statement.strip()} | Params: {parameters}")
    
    @event.listens_for(_engine, "after_cursor_execute")
    def after_cursor_execute(conn, cursor, statement, parameters, context, executemany):
        """
        在执行SQL语句后记录日志
        :param conn: 数据库连接
        :param cursor: 数据库游标
        :param statement: SQL语句
        :param parameters: SQL参数
        :param context: 执行上下文
        :param executemany: 是否批量执行
        """
        query_time = (time.perf_counter() - context._query_start_time) * 1000
        rowcount = cursor.rowcount
        log.info(f"SQL DONE: Time: {query_time:.2f}ms | Rows: {rowcount}")

    @event.listens_for(_engine, "handle_error")
    def handle_error(exception_context):
        """
        处理SQLAlchemy执行错误
        :param conn: 数据库连接
        :param cursor: 数据库游标
        :param statement: SQL语句
        :param parameters: SQL参数
        :param context: 执行上下文
        :param error: 异常实例
        """
        stmt = exception_context.statement
        params = exception_context.parameters
        log.error(f"SQL ERROR: {stmt.strip()} | Params: {params} | Error: {exception_context.original_exception}")

__all__ = ["logger", "TraceID", "attach_sql_logger"]
