import logging
import logging.handlers
import os
import threading
import gzip
import time
from datetime import datetime, timedelta

class AdvancedLogger:
    def __init__(self, name, log_dir="logs", max_file_size=10*1024*1024, 
                 backup_count=5, archive_days=7, error_threshold=100):
        """
        高级日志记录器
        
        参数:
        name: 日志记录器名称
        log_dir: 日志存储目录
        max_file_size: 单个日志文件最大大小 (字节)
        backup_count: 保留的日志文件备份数量
        archive_days: 错误日志归档周期 (天)
        error_threshold: 触发归档的错误日志数量阈值
        """
        self.name = name
        self.log_dir = log_dir
        self.max_file_size = max_file_size
        self.backup_count = backup_count
        self.archive_days = archive_days
        self.error_threshold = error_threshold
        self.error_count = 0
        self.last_archive_time = datetime.now()
        
        # 确保日志目录存在
        os.makedirs(log_dir, exist_ok=True)
        
        # 创建日志记录器
        self.logger = logging.getLogger(name)
        self.logger.setLevel(logging.DEBUG)
        
        # 添加线程锁
        self.lock = threading.Lock()
        
        # 设置日志格式
        formatter = logging.Formatter(
            '%(asctime)s | %(threadName)s | %(levelname)-8s | %(message)s',
            datefmt='%Y-%m-%d %H:%M:%S'
        )
        
        # 创建文件处理器 (支持大小分割)
        log_file = os.path.join(log_dir, f"{name}.log")
        file_handler = logging.handlers.RotatingFileHandler(
            log_file, 
            maxBytes=max_file_size, 
            backupCount=backup_count
        )
        file_handler.setFormatter(formatter)
        file_handler.setLevel(logging.DEBUG)
        
        # 创建错误文件处理器
        error_file = os.path.join(log_dir, f"{name}_error.log")
        self.error_handler = logging.handlers.RotatingFileHandler(
            error_file,
            maxBytes=max_file_size,
            backupCount=backup_count
        )
        self.error_handler.setFormatter(formatter)
        self.error_handler.setLevel(logging.ERROR)
        
        # 添加处理器
        self.logger.addHandler(file_handler)
        self.logger.addHandler(self.error_handler)
        
        # 添加控制台处理器 (可选)
        console_handler = logging.StreamHandler()
        console_handler.setFormatter(formatter)
        console_handler.setLevel(logging.INFO)
        self.logger.addHandler(console_handler)
        
        # 启动归档监控线程
        self.archive_thread = threading.Thread(target=self._monitor_and_archive, daemon=True)
        self.archive_thread.start()
    
    def log(self, level, message, *args, **kwargs):
        """记录日志"""
        with self.lock:
            # 如果是错误日志，增加计数
            if level >= logging.ERROR:
                self.error_count += 1
                
            # 执行日志记录
            self.logger.log(level, message, *args, **kwargs)
    
    def debug(self, message, *args, **kwargs):
        self.log(logging.DEBUG, message, *args, **kwargs)
    
    def info(self, message, *args, **kwargs):
        self.log(logging.INFO, message, *args, **kwargs)
    
    def warning(self, message, *args, **kwargs):
        self.log(logging.WARNING, message, *args, **kwargs)
    
    def error(self, message, *args, **kwargs):
        self.log(logging.ERROR, message, *args, **kwargs)
    
    def critical(self, message, *args, **kwargs):
        self.log(logging.CRITICAL, message, *args, **kwargs)
    
    def _monitor_and_archive(self):
        """监控并归档错误日志的后台线程"""
        while True:
            try:
                current_time = datetime.now()
                
                # 检查是否达到归档条件
                time_elapsed = (current_time - self.last_archive_time) > timedelta(days=self.archive_days)
                if self.error_count >= self.error_threshold or time_elapsed:
                    self._archive_error_logs()
                    self.last_archive_time = current_time
                    self.error_count = 0
                
                # 每小时检查一次
                time.sleep(3600)
                
            except Exception as e:
                # 避免归档线程崩溃
                self.logger.error(f"归档线程错误: {e}")
                time.sleep(60)
    
    def _archive_error_logs(self):
        """归档错误日志"""
        with self.lock:
            try:
                # 关闭当前错误日志处理器
                self.logger.removeHandler(self.error_handler)
                self.error_handler.close()
                
                # 归档文件
                base_name = os.path.join(self.log_dir, f"{self.name}_error")
                current_file = f"{base_name}.log"
                archive_file = f"{base_name}_{datetime.now().strftime('%Y%m%d_%H%M%S')}.gz"
                
                if os.path.exists(current_file) and os.path.getsize(current_file) > 0:
                    # 压缩日志文件
                    with open(current_file, 'rb') as f_in:
                        with gzip.open(archive_file, 'wb') as f_out:
                            f_out.writelines(f_in)
                    
                    # 删除原文件
                    os.remove(current_file)
                
                # 重新创建错误日志处理器
                self.error_handler = logging.handlers.RotatingFileHandler(
                    current_file,
                    maxBytes=self.max_file_size,
                    backupCount=self.backup_count
                )
                formatter = logging.Formatter(
                    '%(asctime)s | %(threadName)s | %(levelname)-8s | %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S'
                )
                self.error_handler.setFormatter(formatter)
                self.error_handler.setLevel(logging.ERROR)
                self.logger.addHandler(self.error_handler)
                
                self.logger.info(f"错误日志已归档: {archive_file}")
                
            except Exception as e:
                self.logger.error(f"归档错误日志失败: {e}")
    
    def cleanup_old_archives(self, max_age_days=30):
        """清理旧的归档文件"""
        with self.lock:
            try:
                cutoff_time = datetime.now() - timedelta(days=max_age_days)
                
                for filename in os.listdir(self.log_dir):
                    if filename.startswith(f"{self.name}_error_") and filename.endswith(".gz"):
                        file_path = os.path.join(self.log_dir, filename)
                        file_time = datetime.fromtimestamp(os.path.getctime(file_path))
                        
                        if file_time < cutoff_time:
                            os.remove(file_path)
                            self.logger.info(f"已清理旧归档文件: {filename}")
                
            except Exception as e:
                self.logger.error(f"清理旧归档文件失败: {e}")

# 使用示例
if __name__ == "__main__":
    # 创建日志记录器
    logger = AdvancedLogger(
        name="my_app",
        log_dir="app_logs",
        max_file_size=5*1024*1024,  # 5MB
        backup_count=10,
        archive_days=1,
        error_threshold=50
    )
    
    # 模拟多线程日志记录
    def worker_thread(thread_id):
        for i in range(100):
            if i % 10 == 0:
                logger.error(f"线程 {thread_id} - 错误 #{i}")
            elif i % 5 == 0:
                logger.warning(f"线程 {thread_id} - 警告 #{i}")
            else:
                logger.info(f"线程 {thread_id} - 信息 #{i}")
            time.sleep(0.01)
    
    # 启动多个线程
    threads = []
    for i in range(5):
        t = threading.Thread(target=worker_thread, args=(i,))
        threads.append(t)
        t.start()
    
    # 等待所有线程完成
    for t in threads:
        t.join()
    
    # 手动触发归档（通常由后台线程自动处理）
    logger._archive_error_logs()
    
    # 清理旧归档
    logger.cleanup_old_archives(max_age_days=7)
