import pymysql
import logging
import logging.handlers
from pathlib import Path

from threading import Lock, Thread
import time
import os
import gzip
from datetime import datetime, timedelta
from queue import Queue, Empty
import socket
TYPETYPE='豆瓣'


class MySQLHandler(logging.Handler):
    """
    多线程安全的MySQL日志处理器，支持自动归档、自动建表
    """
    
    def __init__(self, host, user, password, database, table_name='app_logs', 
                 archive_table='app_logs_archive', port=3306, charset='utf8mb4',
                 max_records=10000, archive_days=30, archive_check_hours=24,
                 auto_create_tables=True):
        """
        初始化MySQL日志处理器
        
        :param host: MySQL主机地址
        :param user: 用户名
        :param password: 密码
        :param database: 数据库名
        :param table_name: 主日志表名
        :param archive_table: 归档日志表名
        :param port: 端口号，默认为3306
        :param charset: 字符集，默认为utf8mb4
        :param max_records: 单表最大记录数，超过将自动归档
        :param archive_days: 归档多少天前的日志
        :param archive_check_hours: 检查归档的间隔小时数
        :param auto_create_tables: 是否自动创建表
        """
        super().__init__()
        self.db_config = {
            'host': host,
            'user': user,
            'password': password,
            'db': database,
            'port': port,
            'charset': charset,
            'cursorclass': pymysql.cursors.DictCursor
        }
        self.table_name = table_name
        self.archive_table = archive_table
        self.max_records = max_records
        self.archive_days = archive_days
        self.archive_check_hours = archive_check_hours
        self.auto_create_tables = auto_create_tables
        self.lock = Lock()
        self.connection = None
        self.last_archive_check = None
        self.log_queue = Queue()
        self.running = True
        
        # 检查表是否存在，不存在则创建
        if self.auto_create_tables:
            self._ensure_tables_exist()
        
        # 启动后台处理线程
        self.process_thread = Thread(target=self._process_queue, daemon=True)
        self.process_thread.start()
        
        # 启动归档检查线程
        self.archive_thread = Thread(target=self._archive_check_loop, daemon=True)
        self.archive_thread.start()
    
    def _ensure_tables_exist(self):
        """
        确保日志表和归档表存在，不存在则创建
        """
        conn = None
        cursor = None
        try:
            conn = self._get_connection()
            cursor = conn.cursor()
            
            # 检查主表是否存在
            cursor.execute(f"""
                CREATE TABLE IF NOT EXISTS {self.table_name} (
                    `id` bigint(20) NOT NULL AUTO_INCREMENT,
                    `timestamp` datetime NOT NULL,
                    `level` varchar(10) NOT NULL,
                    `module` varchar(100) DEFAULT NULL,
                    `message` text NOT NULL,
                    `exception` text DEFAULT NULL,
                    `thread` varchar(50) DEFAULT NULL,
                    `process` varchar(50) DEFAULT NULL,
                    PRIMARY KEY (`id`),
                    KEY `idx_timestamp` (`timestamp`),
                    KEY `idx_level` (`level`)
                ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
            """)
            
            # 检查归档表是否存在
            cursor.execute(f"""
                CREATE TABLE IF NOT EXISTS {self.archive_table} (
                    `id` bigint(20) NOT NULL AUTO_INCREMENT,
                    `timestamp` datetime NOT NULL,
                    `level` varchar(10) NOT NULL,
                    `module` varchar(100) DEFAULT NULL,
                    `message` text NOT NULL,
                    `exception` text DEFAULT NULL,
                    `thread` varchar(50) DEFAULT NULL,
                    `process` varchar(50) DEFAULT NULL,
                    `archived_at` datetime NOT NULL,
                    PRIMARY KEY (`id`),
                    KEY `idx_timestamp` (`timestamp`),
                    KEY `idx_level` (`level`)
                ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
            """)
            
            conn.commit()
            print(f"Ensured tables {self.table_name} and {self.archive_table} exist")
            
        except Exception as e:
            print(f"Error ensuring tables exist: {e}")
            if conn:
                conn.rollback()
            raise
        finally:
            if cursor:
                cursor.close()
            if conn:
                conn.close()
            self.connection = None
    
    def _get_connection(self):
        """获取新数据库连接（不再共享连接）"""
        max_retries = 3
        retry_delay = 1
        
        for attempt in range(max_retries):
            try:
                return pymysql.connect(**self.db_config)
            except pymysql.OperationalError as e:
                if attempt == max_retries - 1:
                    raise
                time.sleep(retry_delay)
                retry_delay *= 2
    
    
    def _close_connection(self):
        """关闭数据库连接"""
        if self.connection and self.connection.open:
            self.connection.close()
        self.connection = None
    
    def emit(self, record):
        """
        将日志记录放入处理队列
        """
        try:
            # 将记录放入队列供后台线程处理
            self.log_queue.put(record)
        except Exception:
            self.handleError(record)
    
    def _process_queue(self):
        """
        后台处理线程，从队列中取出日志并写入数据库
        """
        batch = []
        batch_size = 100  # 批量插入的大小
        
        while self.running or not self.log_queue.empty():
            try:
                # 非阻塞获取，避免无限等待
                record = self.log_queue.get(timeout=1)

                #record['type']=TYPETYPE
                batch.append(self._format_record(record))
                
                # 批量插入
                if len(batch) >= batch_size:
                    self._insert_batch(batch)
                    batch = []
                    
            except Empty:
                # 队列为空，处理剩余批次
                if batch:
                    self._insert_batch(batch)
                    batch = []
                continue
            except Exception as e:
                print(f"Error processing log queue: {e}")
                continue
        
        # 处理剩余批次
        if batch:
            self._insert_batch(batch)
    
    def _format_record(self, record):
        """
        格式化日志记录为数据库行
        """
        # 截断超长字段
        def safe_truncate(value, max_length):
            return str(value)[:max_length] if value else None
        # print('类型类型类型类型');
        # print(record);
        # print(TYPETYPE);
        # print('类型类型类型类型');    
        return {
            'timestamp': datetime.fromtimestamp(record.created),
            'level': safe_truncate(record.levelname, 65535),
            'module': safe_truncate(record.module, 65535),
            'message': safe_truncate(self.format(record), 65535),  # TEXT字段最大长度
            'exception': safe_truncate(record.exc_text, 65535) if record.exc_info else None,
            'thread': safe_truncate(record.threadName, 65535),
            'process': safe_truncate(
                record.processName if hasattr(record, 'processName') else None, 50
            ),
            'type': safe_truncate(TYPETYPE, 65535)
        }
    
    def _insert_batch(self, records):
        """
        批量插入日志记录到数据库
        """
        if not records:
            return
            
        conn = None
        cursor = None
        try:
            # 每次插入使用新连接
            conn = self._get_connection()
            cursor = conn.cursor()
            
            sql = f"""
            INSERT INTO {self.table_name} 
            (timestamp, level, module, message, exception, thread, process,type)
            VALUES (%(timestamp)s, %(level)s, %(module)s, %(message)s, %(exception)s, %(thread)s, %(process)s, %(type)s)
            """
            cursor.executemany(sql, records)
            conn.commit()
            
        except pymysql.Error as e:
            # 记录具体错误信息
            error_msg = f"Database error ({e.args[0]}): {e.args[1]}"
            print(f"Error inserting logs: {error_msg}")
            
            # 保存失败的记录到文件
            self._save_failed_records(records, error_msg)
            
            if conn:
                conn.rollback()
        finally:
            if cursor:
                cursor.close()
            if conn:
                conn.close()
    def _save_failed_records(self, records, error_msg):
        """保存失败的日志记录到本地文件"""
        try:
            filename = f"failed_logs_{datetime.now().strftime('%Y%m%d_%H%M%S')}.log"
            with open(filename, 'a') as f:
                f.write(f"=== Database Insert Failed ({datetime.now()}) ===\n")
                f.write(f"Error: {error_msg}\n")
                f.write(f"Records Count: {len(records)}\n\n")
                for i, record in enumerate(records, 1):
                    f.write(f"Record {i}:\n")
                    f.write(f"  Timestamp: {record['timestamp']}\n")
                    f.write(f"  Level: {record['level']}\n")
                    f.write(f"  Module: {record['module']}\n")
                    f.write(f"  Message: {record['message'][:500]}...\n")  # 截断长消息
                    if record['exception']:
                        f.write(f"  Exception: {record['exception'][:500]}...\n")
                    f.write(f"  type: {record['type']}\n")
                    f.write("\n")
                f.write("="*50 + "\n\n")
            print(f"Saved {len(records)} failed records to {filename}")
        except Exception as e:
            print(f"Error saving failed records: {e}")
    
    # 移除 _check_archive 在插入后的调用
    # 归档检查只在定时任务中进行
    def _check_archive(self):
        """
        检查是否需要归档日志
        """
        now = datetime.now()
        if self.last_archive_check and (now - self.last_archive_check).total_seconds() < self.archive_check_hours * 3600:
            return
            
        self.last_archive_check = now
        
        conn = None
        cursor = None
        try:
            conn = self._get_connection()
            cursor = conn.cursor()
            
            # 检查记录数量
            cursor.execute(f"SELECT COUNT(*) FROM {self.table_name}")
            count = cursor.fetchone()[0]
            
            if count > self.max_records:
                self._archive_old_logs(conn, cursor)
                
        except Exception as e:
            print(f"Error checking archive: {e}")
        finally:
            if cursor:
                cursor.close()
    
    def _archive_old_logs(self, conn, cursor):
        """
        归档旧日志
        """
        try:
            # 开始事务
            conn.begin()
            
            # 计算归档时间点
            archive_date = datetime.now() - timedelta(days=self.archive_days)
            
            # 1. 将旧记录复制到归档表
            cursor.execute(f"""
                INSERT INTO {self.archive_table} 
                (timestamp, level, module, message, exception, thread, process, archived_at,type)
                SELECT timestamp, level, module, message, exception, thread, process, NOW(),type 
                FROM {self.table_name} 
                WHERE timestamp < %s
            """, (archive_date,))
            
            # 2. 删除已归档的记录
            cursor.execute(f"""
                DELETE FROM {self.table_name} 
                WHERE timestamp < %s
            """, (archive_date,))
            
            # 提交事务
            conn.commit()
            
            print(f"Archived logs older than {archive_date}")
            
        except Exception as e:
            conn.rollback()
            print(f"Error archiving logs: {e}")
    
    def _archive_check_loop(self):
        """
        定期检查归档的后台线程
        """
        while self.running:
            time.sleep(self.archive_check_hours * 3600)
            try:
                self._check_archive()
            except Exception as e:
                print(f"Error in archive check loop: {e}")
    
    def close(self):
        """
        关闭处理器
        """
        self.running = False
        if self.process_thread.is_alive():
            self.process_thread.join(timeout=5)
        if self.archive_thread.is_alive():
            self.archive_thread.join(timeout=5)
        self._close_connection()
        super().close()


class SizeRotatingFileHandler(logging.handlers.RotatingFileHandler):
    """
    扩展的RotatingFileHandler，支持按大小分割文件并压缩旧日志
    """
    
    def __init__(self, filename, maxBytes=10*1024*1024, backupCount=5, compress=True, encoding=None, delay=False):
        #连接池
        self.connection_pool = Queue(maxsize=5)

        # 确保目录存在
        os.makedirs(os.path.dirname(filename), exist_ok=True)

        """
        初始化
        
        :param filename: 日志文件名
        :param mode: 明确指定追加模式
        :param maxBytes: 单个文件最大字节数
        :param backupCount: 保留的备份文件数
        :param compress: 是否压缩旧日志
        """


        # 初始化父类（默认使用 'a' 模式）
        super().__init__(
            filename,
            mode='a',  # 明确指定追加模式
            maxBytes=maxBytes,
            backupCount=backupCount,
            encoding=encoding,
            delay=delay
        )
        self.compress = compress

    
    def doRollover(self):
        """
        执行日志滚动，覆盖父类方法以添加压缩支持
        """
        super().doRollover()
        
        if self.compress and self.backupCount > 0:
            # 压缩除当前和最新备份之外的所有旧日志
            for i in range(2, self.backupCount + 1):
                sfn = f"{self.baseFilename}.{i}"
                if os.path.exists(sfn):
                    with open(sfn, 'rb') as f_in:
                        gzip_fn = f"{sfn}.gz"
                        with gzip.open(gzip_fn, 'wb') as f_out:
                            f_out.writelines(f_in)
                    os.remove(sfn)


class ErrorLogArchiver:
    """
    错误日志归档器，定期将错误日志归档到单独的文件
    """
    
    def __init__(self, host, user, password, database, port=3306,
                 error_level=logging.ERROR, archive_dir='error_logs', 
                 archive_days=7, check_hours=24):
        """
        初始化错误日志归档器
        
        :param host: MySQL主机地址
        :param user: 用户名
        :param password: 密码
        :param database: 数据库名
        :param port: 端口号
        :param error_level: 要归档的错误级别
        :param archive_dir: 归档目录
        :param archive_days: 归档多少天前的错误
        :param check_hours: 检查归档的间隔小时数
        """
        self.db_config = {
            'host': host,
            'user': user,
            'password': password,
            'db': database,
            'port': port,
            'charset': 'utf8mb4',
            'cursorclass': pymysql.cursors.DictCursor
        }
        self.error_level = error_level
        self.archive_dir = archive_dir
        self.archive_days = archive_days
        self.check_hours = check_hours
        self.running = True
        
        # 创建归档目录
        os.makedirs(self.archive_dir, exist_ok=True)
        
        # 启动归档线程
        self.thread = Thread(target=self._archive_loop, daemon=True)
        self.thread.start()
    
    def _archive_loop(self):
        """
        归档循环
        """
        while self.running:
            self._archive_errors()
            time.sleep(self.check_hours * 3600)
    
    def _archive_errors(self):
        """
        归档错误日志
        """
        conn = None
        cursor = None
        try:
            conn = pymysql.connect(**self.db_config)
            cursor = conn.cursor()
            
            # 计算归档时间点
            archive_date = datetime.now() - timedelta(days=self.archive_days)
            
            # 查询需要归档的错误日志
            cursor.execute("""
                SELECT * FROM app_logs 
                WHERE level = %s AND timestamp < %s
                ORDER BY timestamp
            """, (logging.getLevelName(self.error_level), archive_date))
            
            errors = cursor.fetchall()
            
            if not errors:
                return
                
            # 按日期分组错误
            error_groups = {}
            for error in errors:
                date_str = error['timestamp'].strftime('%Y-%m-%d')
                if date_str not in error_groups:
                    error_groups[date_str] = []
                error_groups[date_str].append(error)
            
            # 为每天创建归档文件
            for date_str, daily_errors in error_groups.items():
                filename = os.path.join(self.archive_dir, f"errors_{date_str}.log")
                
                with open(filename, 'a', encoding='utf-8') as f:
                    for error in daily_errors:
                        f.write(f"{error['type']}{error['timestamp']} [{error['level']}] {error['module']}: {error['message']}\n")
                        if error['exception']:
                            f.write(f"Exception: {error['exception']}\n")
                        f.write("\n")
                
                # 压缩文件
                with open(filename, 'rb') as f_in:
                    with gzip.open(f"{filename}.gz", 'wb') as f_out:
                        f_out.writelines(f_in)
                os.remove(filename)
            
            # 从数据库中删除已归档的错误
            cursor.execute("""
                DELETE FROM app_logs 
                WHERE level = %s AND timestamp < %s
            """, (logging.getLevelName(self.error_level), archive_date))
            conn.commit()
            
            print(f"Archived {len(errors)} error logs")
            
        except Exception as e:
            print(f"Error archiving error logs: {e}")
            if conn:
                conn.rollback()
        finally:
            if cursor:
                cursor.close()
            if conn:
                conn.close()
    
    def close(self):
        """
        关闭归档器
        """
        self.running = False
        if self.thread.is_alive():
            self.thread.join(timeout=5)
def ensure_dir_exists(dir_path):
    """确保目录存在（不存在则创建）"""
    if isinstance(dir_path, str):
        dir_path = Path(dir_path)
    dir_path.mkdir(parents=True, exist_ok=True)
    return dir_path

def configure_logging(mysql_host, mysql_user, mysql_password, mysql_database, mysql_port=3306):
    """
    配置日志系统
    
    :param mysql_host: MySQL主机地址
    :param mysql_user: 用户名
    :param mysql_password: 密码
    :param mysql_database: 数据库名
    :param mysql_port: 端口号，默认为3306
    :return: (logger, error_archiver) 元组
    """
    # 创建日志记录器
    logger = logging.getLogger()
    logger.setLevel(logging.DEBUG)
    
    # 控制台处理器
    console_handler = logging.StreamHandler()
    console_handler.setLevel(logging.INFO)
    console_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    console_handler.setFormatter(console_formatter)
    logger.addHandler(console_handler)

    dir_path='error_logs/app.log'
    #ensure_dir_exists(dir_path)
    
    # 文件处理器（按大小分割）
    file_handler = SizeRotatingFileHandler(dir_path, maxBytes=1*1024*1024, backupCount=5)
    file_handler.setLevel(logging.DEBUG)
    file_formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
    file_handler.setFormatter(file_formatter)
    logger.addHandler(file_handler)
    
    # MySQL处理器
    mysql_handler = MySQLHandler(
        host=mysql_host,
        user=mysql_user,
        password=mysql_password,
        database=mysql_database,
        port=mysql_port,
        max_records=50000,
        archive_days=7
    )
    mysql_handler.setLevel(logging.INFO)
    mysql_formatter = logging.Formatter('%(message)s')
    mysql_handler.setFormatter(mysql_formatter)
    logger.addHandler(mysql_handler)
    
    # 错误日志归档器
    error_archiver = ErrorLogArchiver(
        host=mysql_host,
        user=mysql_user,
        password=mysql_password,
        database=mysql_database,
        port=mysql_port,
        error_level=logging.ERROR
    )
    
    return logger, error_archiver


# 使用示例
if __name__ == "__main__":
    import random
    print('配置日志开始')
    # 配置日志
    logger, error_archiver = configure_logging(
        mysql_host='1.194.161.240',
        mysql_user='xingfan_module',
        mysql_password='aE0sqVnRybqEB9R6',
        mysql_database='xingfan_module',
        mysql_port=23456
    )
    
    # 模拟多线程日志记录
    def worker(logger, worker_id):
        for i in range(100):
            log_level = random.choice([
                logging.DEBUG, logging.INFO, 
                logging.WARNING, logging.ERROR, 
                logging.CRITICAL
            ])
            #print(log_level)
            logger.log(log_level, f"Worker {worker_id} message {i}")
            
            # 随机生成一些错误
            if random.random() < 0.1:
                try:
                    1 / 0
                except Exception as e:
                    logger.error(f"Worker {worker_id} encountered error", exc_info=True)
            
            time.sleep(random.uniform(0.01, 0.1))
    
    # 启动多个工作线程
    threads = []
    for i in range(5):
        t = Thread(target=worker, args=(logger, i))
        threads.append(t)
        t.start()
    
    # 等待所有线程完成
    for t in threads:
        t.join()
    
    # 关闭处理器
    for handler in logger.handlers:
        handler.close()
    error_archiver.close()
