from loguru import logger
import logging
import time
import os, sys
from fastapi import Request
import asyncio
import orjson

from config.base import get_settings

settings = get_settings()

# 自定义拦截处理器
class InterceptHandler(logging.Handler):
    
    def emit(self, record):
        # 获取 loguru 记录器
        try:
            level = logger.level(record.levelname).name
        except ValueError:
            level = record.levelno
            
        frame, depth = logging.currentframe(), 2
        while frame.f_code.co_filename == logging.__file__:
            frame = frame.f_back
            depth += 1
        
        logger.patch(patching).opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
        
def serialize(record):
    subset = {
        "logtime": record["time"].isoformat(), 
        "level": record["level"].name,
        "message": record["message"]
    }
    return orjson.dumps(subset).decode('utf-8')

def patching(record):
    record["extra"]["serialized"] = serialize(record)

async def create_directories():
    
    info_dir = os.path.join(settings.log.PATH, settings.log.INFO_DIR)
    error_dir = os.path.join(settings.log.PATH, settings.log.INFO_DIR)
    
    await asyncio.to_thread(os.makedirs, info_dir, exist_ok=True)
    await asyncio.to_thread(os.makedirs, error_dir, exist_ok=True)

async def init_log():
    # 确保日志目录存在
    await create_directories()
    console_level = "DEBUG" if settings.app.ENV in ['test', 'dev'] else "INFO"
    file_info_level = "DEBUG" if settings.app.ENV in ['test', 'dev'] else "INFO"
    
    info_file = os.path.join(
        settings.log.PATH, 
        settings.log.INFO_DIR,
        settings.log.INFO_FILE
    )
    
    error_file = os.path.join(
        settings.log.PATH, 
        settings.log.ERROR_DIR,
        settings.log.ERROR_FILE
    )

    # 配置控制台日志
    logger.add(
        sink=sys.stdout,
        level=console_level,
        format="{time} [ThreadID:{thread}] [{level}] {name} {function}:{line} - {message}",
        colorize=True,
        enqueue=True,
        backtrace=False,
        diagnose=False
    )
    '''配置 error 日志'''
    logger.add(
        sink=error_file,
        level="ERROR",
        format="{time} [ThreadID:{thread}] [{level}] {name} {function}:{line} - {message}",
        rotation="00:00",
        retention="15 days",
        backtrace=False,
        diagnose=False,
        enqueue=True,
        filter=lambda record: record["level"].no >= logger.level("ERROR").no
    )
    
    '''配置 info 日志'''
    logger.add(
        sink=info_file,
        level=file_info_level,
        format="{time} [ThreadID:{thread}] [{level}] {name} {function}:{line} - {message}",
        rotation="00:00",
        retention="15 days",
        enqueue=True,
        filter=lambda record: record["level"].no < logger.level("ERROR").no
    )

    # 设置标准库 `logging` 的日志级别
    import logging
    logging.getLogger("requests").setLevel(logging.WARNING)
    logging.getLogger("urllib3").setLevel(logging.WARNING)
    logging.getLogger("tortoise").setLevel(logging.WARNING)
    logging.getLogger("asyncio").setLevel(logging.WARNING)
    logging.getLogger("docker").setLevel(logging.WARNING)
    logging.getLogger("multipart").setLevel(logging.WARNING)
    
    # 拦截所有日志记录
    logging.root.handlers = [InterceptHandler()]
    logging.root.setLevel(logging.DEBUG if settings.app.ENV in ['test', 'dev'] else logging.INFO)
    
log = logger.patch(patching)