package com.Cychat.commons.appender;

import ch.qos.logback.classic.spi.ILoggingEvent;
import ch.qos.logback.core.UnsynchronizedAppenderBase;
import com.Cychat.commons.utils.SnowflakeIdWorker;
import jakarta.annotation.PostConstruct;
import jakarta.annotation.PreDestroy;
import lombok.Data;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.stereotype.Component;

import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
import java.util.concurrent.TimeUnit;

@Component
@Slf4j
@Data
public class BatchDbLogAppender extends UnsynchronizedAppenderBase<ILoggingEvent> {

    @Value("${server.id:1}")
    private long serverId;
    // 雪花算法工具类，用于生成唯一的日志ID
    private SnowflakeIdWorker snowflakeIdWorker;
    // 数据库操作模板，用于执行SQL语句和数据库交互
    private final JdbcTemplate jdbcTemplate;
    // 日期格式化工具，统一日志时间格式为"年-月-日 时:分:秒"
    private final SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
    // 存储日志事件的列表，用于暂存待处理的日志数据
    private final List<ILoggingEvent> logEvents = new ArrayList<>();
    // 同步锁对象，用于保证多线程环境下对共享资源（如logEvents）的线程安全访问
    private final Object lock = new Object();
    // 线程池执行器，用于管理异步日志处理任务的线程资源
    private ThreadPoolTaskExecutor executor;
    // 批量处理阈值，当累计日志事件数达到此值时触发批量提交
    private int batchSize = 5;
    // 提交间隔（单位需结合业务逻辑确定，如秒），用于定时触发日志批量提交
    private int commitInterval = 3;
    // 运行状态标记（volatile保证多线程可见性），指示日志处理流程是否处于激活状态
    private volatile boolean isRunning = false;


    // 构造方法注入 JdbcTemplate
    public BatchDbLogAppender(JdbcTemplate jdbcTemplate) {
        this.jdbcTemplate = jdbcTemplate;
    }

    // -------------------------- Spring 初始化：启动线程池 --------------------------
    // @PostConstruct：在对象（BatchDbLogAppender）创建完毕之后执行这个方法
    @PostConstruct
    public void init() {
        if (isRunning) {
            return;
        }
        executor = new ThreadPoolTaskExecutor();
        executor.setCorePoolSize(1);
        executor.setMaxPoolSize(1);
        executor.setQueueCapacity(1000);
        executor.setThreadNamePrefix("log-batch-");
        executor.initialize(); // 初始化线程池

        executor.execute(() -> {
            isRunning = true;
            while (isRunning) {
                try {
                    TimeUnit.SECONDS.sleep(commitInterval);
                    flushLogs();
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    break;
                }
            }
        });
        snowflakeIdWorker = new SnowflakeIdWorker(serverId);
        log.info("appender 已初始化");
    }

    // -------------------------- Spring 销毁：关闭线程池 --------------------------
    @PreDestroy
    public void destroy() {
        if (!isRunning || executor == null) {
            return;
        }
        isRunning = false;
        flushLogs(); // 提交剩余日志

        try {
            // 1. 先调用 Spring 线程池的 shutdown 方法
            executor.shutdown();
            // 2. 获取底层的 ThreadPoolExecutor 实例（关键：Spring Boot 3.x 适配）
            java.util.concurrent.ThreadPoolExecutor underlyingExecutor = executor.getThreadPoolExecutor();
            if (underlyingExecutor != null) {
                // 3. 调用底层线程池的 awaitTermination 等待优雅关闭
                if (underlyingExecutor.awaitTermination(5, TimeUnit.SECONDS)) {
                    log.info("线程池已优雅停止");
                } else {
                    // 4. 超时则调用底层线程池的 shutdownNow 强制终止
                    List<Runnable> unfinishedTasks = underlyingExecutor.shutdownNow();
                    log.warn("线程池被迫停止，有 {} 个任务未完成", unfinishedTasks.size());
                }
            }
        } catch (InterruptedException e) {
            // 5. 处理中断，重新标记线程状态（JDK 17 必需）
            Thread.currentThread().interrupt();
            log.error("停止ThreadPool时线程中断", e);
            // 强制终止底层线程池
            if (executor != null) {
                java.util.concurrent.ThreadPoolExecutor underlyingExecutor = executor.getThreadPoolExecutor();
                if (underlyingExecutor != null) {
                    underlyingExecutor.shutdownNow();
                }
            }
        } finally {
            executor = null;
            log.info("appender 已销毁");
        }
    }

    // -------------------------- 日志收集与批量提交 --------------------------
    @Override
    protected void append(ILoggingEvent event) {
        if (!isRunning || event == null) {
            return;
        }
        synchronized (lock) {
            logEvents.add(event);
            if (logEvents.size() >= batchSize) {
                flushLogs();
                log.info("批量提交 {} 条日志", batchSize);
            }
        }
    }

    private void flushLogs() {
        List<ILoggingEvent> eventsToProcess;
        synchronized (lock) {
            if (logEvents.isEmpty()) {
                return;
            }
            eventsToProcess = new ArrayList<>(logEvents);
            logEvents.clear();
        }

        try {
            String sql = "INSERT INTO sys_log (" +
                    "id, log_time, level, logger, message, thread, " +
                    "class_name, method_name, line_number, exception) " +
                    "VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)";

            jdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
                @Override
                public void setValues(PreparedStatement ps, int i) throws SQLException {
                    ILoggingEvent event = eventsToProcess.get(i);
                    ps.setLong(1, snowflakeIdWorker.nextId());
                    ps.setString(2, sdf.format(new Date(event.getTimeStamp())));
                    ps.setString(3, event.getLevel().toString());
                    ps.setString(4, event.getLoggerName());
                    ps.setString(5, event.getFormattedMessage());
                    ps.setString(6, event.getThreadName());

                    if (event.getCallerData() != null && event.getCallerData().length > 0) {
                        ps.setString(7, event.getCallerData()[0].getClassName());
                        ps.setString(8, event.getCallerData()[0].getMethodName());
                        ps.setInt(9, event.getCallerData()[0].getLineNumber());
                    } else {
                        ps.setString(7, "");
                        ps.setString(8, "");
                        ps.setInt(9, 0);
                    }

                    String exception = null;
                    if (event.getThrowableProxy() != null) {
                        exception = event.getThrowableProxy().getClassName() + ": " + event.getThrowableProxy().getMessage();
                    }
                    ps.setString(10, exception);
                }

                @Override
                public int getBatchSize() {
                    return eventsToProcess.size();
                }
            });

            log.debug("成功写入 {} 条日志到数据库", eventsToProcess.size());
        } catch (Exception e) {
            log.debug("写入 {} 条日志到数据库失败", eventsToProcess.size(), e);
        }
    }
}