package com.sparkflow.mysqlbinlog.clickhousesync.listener.binlog;

import com.github.shyiko.mysql.binlog.BinaryLogClient;
import com.github.shyiko.mysql.binlog.event.*;
import com.sparkflow.mysqlbinlog.clickhousesync.core.ddl.ClickHouseDDLProcessor;
import com.sparkflow.mysqlbinlog.clickhousesync.service.metadata.TableMetadataService;
import com.sparkflow.mysqlbinlog.clickhousesync.core.sql.ClickHouseSqlBuilder;
import com.sparkflow.mysqlbinlog.clickhousesync.service.position.BinlogPositionService;
import com.sparkflow.mysqlbinlog.clickhousesync.config.properties.MysqlBinlogProperties;
import com.sparkflow.mysqlbinlog.clickhousesync.utils.TimeUtil;
import lombok.extern.slf4j.Slf4j;
import net.sf.jsqlparser.parser.CCJSqlParserUtil;
import net.sf.jsqlparser.statement.Statement;
import net.sf.jsqlparser.statement.alter.Alter;
import net.sf.jsqlparser.statement.create.table.CreateTable;
import net.sf.jsqlparser.statement.drop.Drop;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.jdbc.core.BatchPreparedStatementSetter;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import java.io.IOException;
import java.io.Serializable;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * MySQL Binlog事件处理器
 * 负责监听MySQL Binlog事件并同步到ClickHouse数据库
 *
 * @author Mr_xiaoZH
 * @date 2025-07-23
 */
@Slf4j
@Component
public class BinlogEventHandler {

    @Autowired
    private BinaryLogClient binaryLogClient;
    @Autowired
    @Qualifier("clickHouseJdbcTemplate")
    private JdbcTemplate clickHouseJdbcTemplate;
    @Autowired
    private MysqlBinlogProperties mysqlBinlogProperties;
    @Autowired
    private TableMetadataService tableMetadataService;

    private String currentBinlogFilename;
    // 表ID映射：表ID -> 表名
    private final Map<Long, String> tableIdMap = new ConcurrentHashMap<>();
    private ExecutorService executor;

    /**
     * 初始化并启动Binlog监听
     * 在Spring容器启动后自动执行
     */
    @PostConstruct
    public void startListening() {
        try {
            // 初始化加载表元数据信息
            this.tableMetadataService.initialize();

            // 加载持久化的位置
            loadBinlogPosition();

            // 注册事件监听器
            registerEventListener();

            // 启动Binlog客户端
            startBinlogClient();

            log.info("MySQL Binlog Event Handler started successfully");
        } catch (Exception e) {
            log.error("Failed to start MySQL Binlog Event Handler", e);
            throw new RuntimeException("Binlog handler startup failed", e);
        }
    }

    /**
     * 加载持久化的Binlog位置信息
     * 用于断点续传，从上次同步的位置继续同步
     */
    private void loadBinlogPosition() {
        BinlogPositionService.BinlogPosition position = BinlogPositionService.load();
        if (position != null) {
            binaryLogClient.setBinlogFilename(position.getFilename());
            binaryLogClient.setBinlogPosition(position.getPosition());
            log.info("Loaded binlog position: {} at {}", position.getFilename(), position.getPosition());
        } else {
            log.info("No previous binlog position found, starting from current position");
        }
    }

    /**
     * 注册Binlog事件监听器
     * 监听所有Binlog事件并分发到对应的处理方法
     */
    private void registerEventListener() {
        binaryLogClient.registerEventListener(event -> {
            try {
                processEvent(event);
            } catch (Exception e) {
                log.error("Error processing binlog event: {}", event.getHeader().getEventType(), e);
            }
        });
    }

    /**
     * 处理Binlog事件
     * 根据事件类型分发到不同的处理方法
     *
     * @param event Binlog事件
     * @return 是否需要保存Binlog位置
     */
    private void processEvent(Event event) {
        boolean shouldSavePosition = false;
        EventData data = event.getData();

        if (data instanceof RotateEventData) {
            shouldSavePosition = handleRotateEvent((RotateEventData) data);
        } else if (data instanceof TableMapEventData) {
            handleTableMapEvent((TableMapEventData) data);
        } else if (data instanceof QueryEventData) {
            shouldSavePosition = handleQueryEvent((QueryEventData) data);
        } else if (data instanceof WriteRowsEventData) {
            shouldSavePosition = handleWriteRowsEvent((WriteRowsEventData) data);
        } else if (data instanceof UpdateRowsEventData) {
            shouldSavePosition = handleUpdateRowsEvent((UpdateRowsEventData) data);
        } else if (data instanceof DeleteRowsEventData) {
            shouldSavePosition = handleDeleteRowsEvent((DeleteRowsEventData) data);
        }

        if (shouldSavePosition) {
            saveBinlogPosition();
        }
    }

    /**
     * 处理Binlog文件轮转事件
     * 当MySQL切换到新的Binlog文件时触发
     *
     * @param rotateData 轮转事件数据
     * @return 是否需要保存Binlog位置
     */
    private boolean handleRotateEvent(RotateEventData rotateData) {
        currentBinlogFilename = rotateData.getBinlogFilename();
        log.info("Binlog file rotated to: {}", rotateData.getBinlogFilename());
        return false;
    }

    /**
     * 处理表映射事件
     * 记录表ID与表名的映射关系
     *
     * @param tableData 表映射事件数据
     */
    private void handleTableMapEvent(TableMapEventData tableData) {
        String tableName = tableData.getTable();
        String tableSchema = tableData.getDatabase();
        long tableId = tableData.getTableId();
        String fullTableName = String.format("%s.%s", tableSchema, tableName);

        if (mysqlBinlogProperties.getSyncTables().contains(fullTableName)) {
            log.debug("Table mapped: {} (ID: {})", fullTableName, tableId);
            tableIdMap.put(tableId, fullTableName);
        }
    }

    /**
     * 处理查询事件（DDL语句）
     * 捕获并处理DDL语句，同步到ClickHouse
     *
     * @param queryData 查询事件数据
     * @return 是否需要保存Binlog位置
     */
    private boolean handleQueryEvent(QueryEventData queryData) {
        String sql = queryData.getSql();
        if (sql == null || !ClickHouseDDLProcessor.isDDL(sql)) {
            return false;
        }

        log.info("DDL detected: {}", sql);
        return processDDL(sql, queryData.getDatabase());
    }

    /**
     * 处理DDL语句
     * 解析DDL语句并转换为ClickHouse兼容的DDL
     *
     * @param sql DDL语句
     * @param currentSchema 当前数据库Schema
     * @return 是否成功处理并需要保存位置
     */
    private boolean processDDL(String sql, String currentSchema) {
        try {
            String tableName = ClickHouseDDLProcessor.extractTableName(sql).replaceAll("`", "");
            if (!tableName.contains(".")) {
                tableName = String.format("%s.%s", currentSchema, tableName);
            }

            if (tableName != null && mysqlBinlogProperties.getSyncTables().contains(tableName)) {
                String clickhouseDDL = null;
                Statement stmt = CCJSqlParserUtil.parse(sql);

                if (stmt instanceof CreateTable) {
                    clickhouseDDL = ClickHouseDDLProcessor.handleCreateTable((CreateTable) stmt);
                } else if (stmt instanceof Drop) {
                    clickhouseDDL = ClickHouseDDLProcessor.handleDropTable((Drop) stmt, tableName);
                } else if (stmt instanceof Alter) {
                    clickhouseDDL = ClickHouseDDLProcessor.handleAlterTable((Alter) stmt, tableName);
                } else {
                    log.warn("Unsupported DDL type: {}", sql);
                    return false;
                }

                if (StringUtils.isNotBlank(clickhouseDDL)) {
                    log.info("Syncing DDL to ClickHouse: {}", clickhouseDDL);
                    clickHouseJdbcTemplate.execute(clickhouseDDL);
                    return true;
                }
            }
        } catch (Exception e) {
            log.error("Failed to sync DDL to ClickHouse: {}", sql, e);
        }
        return false;
    }

    /**
     * 处理行插入事件
     *
     * @param eventData 插入事件数据
     * @return 是否需要保存Binlog位置
     */
    private boolean handleWriteRowsEvent(WriteRowsEventData eventData) {
        String tableName = getTableNameById(eventData.getTableId());
        if (tableName != null && isTableSynced(tableName)) {
            handleWriteRows(eventData);
            return true;
        }
        return false;
    }

    /**
     * 处理行更新事件
     *
     * @param eventData 更新事件数据
     * @return 是否需要保存Binlog位置
     */
    private boolean handleUpdateRowsEvent(UpdateRowsEventData eventData) {
        String tableName = getTableNameById(eventData.getTableId());
        if (tableName != null && isTableSynced(tableName)) {
            handleUpdateRows(eventData);
            return true;
        }
        return false;
    }

    /**
     * 处理行删除事件
     *
     * @param eventData 删除事件数据
     * @return 是否需要保存Binlog位置
     */
    private boolean handleDeleteRowsEvent(DeleteRowsEventData eventData) {
        String tableName = getTableNameById(eventData.getTableId());
        if (tableName != null && isTableSynced(tableName)) {
            handleDeleteRows(eventData);
            return true;
        }
        return false;
    }

    /**
     * 根据表ID获取表名
     *
     * @param tableId 表ID
     * @return 表名（格式：schema.table）
     */
    private String getTableNameById(long tableId) {
        return tableIdMap.get(tableId);
    }

    /**
     * 检查表是否需要同步
     *
     * @param tableName 表名
     * @return 是否需要同步
     */
    private boolean isTableSynced(String tableName) {
        return mysqlBinlogProperties.getSyncTables().contains(tableName);
    }

    /**
     * 保存当前Binlog位置
     * 用于断点续传
     */
    private void saveBinlogPosition() {
        try {
            BinlogPositionService.BinlogPosition currentPos = new BinlogPositionService.BinlogPosition();
            currentPos.setFilename(currentBinlogFilename);
            currentPos.setPosition(binaryLogClient.getBinlogPosition());
            BinlogPositionService.savePosition(currentPos);
        } catch (Exception e) {
            log.error("Failed to save binlog position", e);
        }
    }

    /**
     * 启动Binlog客户端
     * 在独立线程中连接MySQL Binlog服务
     */
    private void startBinlogClient() {
        executor = Executors.newSingleThreadExecutor(r -> {
            Thread thread = new Thread(r, "binlog-client-thread");
            thread.setDaemon(true);
            return thread;
        });

        executor.submit(() -> {
            try {
                binaryLogClient.connect();
            } catch (IOException e) {
                log.error("Error connecting to MySQL Binlog", e);
                throw new RuntimeException("Failed to connect to MySQL Binlog", e);
            }
        });
    }

    /**
     * 关闭资源
     * 在应用关闭时执行，确保资源正确释放
     */
    @PreDestroy
    public void shutdown() {
        try {
            if (binaryLogClient != null) {
                binaryLogClient.disconnect();
            }
            if (executor != null && !executor.isShutdown()) {
                executor.shutdown();
            }
            log.info("BinlogEventHandler shutdown completed");
        } catch (Exception e) {
            log.error("Error during shutdown", e);
        }
    }

    /**
     * 处理插入行数据
     * 将MySQL的插入操作转换为ClickHouse的插入操作
     *
     * @param eventData 插入事件数据
     */
    private void handleWriteRows(WriteRowsEventData eventData) {
        long tableId = eventData.getTableId();
        String tableName = tableIdMap.get(tableId);
        if (tableName == null) {
            log.warn("Table name not found for table ID: {}", tableId);
            return;
        }

        // 检查表元数据是否存在
        if (!TableMetadataService.tableMetadataCache.containsTable(tableName)) {
            log.warn("Table metadata not found for table: {}, skipping event", tableName);
            return;
        }

        log.info("Write Rows Event for table: {}", tableName);
        List<String> columns = TableMetadataService.tableMetadataCache.getTableColumns(tableName);
        Map<String, String> columnTypeMap = TableMetadataService.tableMetadataCache.getColumnTypeMap(tableName);
        // 根据表名生成 SQL 并执行插入操作
        String sql = ClickHouseSqlBuilder.generateInsertSql(tableName, columns);

        pushToClickHouse(sql, eventData.getRows(), columns, columnTypeMap);
    }

    /**
     * 处理更新行数据
     * 将MySQL的更新操作转换为ClickHouse的插入操作（ReplacingMergeTree引擎）
     *
     * @param eventData 更新事件数据
     */
    private void handleUpdateRows(UpdateRowsEventData eventData) {
        long tableId = eventData.getTableId();
        String tableName = tableIdMap.get(tableId);
        if (tableName == null) {
            log.warn("Table name not found for table ID: {}", tableId);
            return;
        }
        log.info("Update Rows Event for table: {}", tableName);
        List<String> columns = TableMetadataService.tableMetadataCache.getTableColumns(tableName);
        Map<String, String> columnTypeMap = TableMetadataService.tableMetadataCache.getColumnTypeMap(tableName);
        // 根据表名生成 SQL 并执行插入操作
        String sql = ClickHouseSqlBuilder.generateInsertSql(tableName, columns);
        for (Map.Entry<Serializable[], Serializable[]> row : eventData.getRows()) {
            Serializable[] updatedRow = row.getValue();
            // 根据表名生成 SQL 并执行更新操作
            pushToClickHouse(sql, Collections.singletonList(updatedRow), columns, columnTypeMap);
            clickHouseJdbcTemplate.update(ClickHouseSqlBuilder.generateOptimizeSql(tableName));
        }
    }

    /**
     * 处理删除行数据
     * 将MySQL的删除操作转换为ClickHouse的删除操作
     *
     * @param eventData 删除事件数据
     */
    private void handleDeleteRows(DeleteRowsEventData eventData) {
        long tableId = eventData.getTableId();
        String tableName = tableIdMap.get(tableId);
        if (tableName == null) {
            log.warn("Table name not found for table ID: {}", tableId);
            return;
        }
        log.info("Delete Rows Event for table: {}", tableName);
        String primaryKey = TableMetadataService.tableMetadataCache.getPrimaryKey(tableName);
        int primaryKeyIndex = getPrimaryKeyIndex(tableName, primaryKey);
        String sql = ClickHouseSqlBuilder.generateDeleteSql(tableName, primaryKey);
        for (Serializable[] row : eventData.getRows()) {
            final Serializable primaryKeyValue = row[primaryKeyIndex];
            try {
                clickHouseJdbcTemplate.update(sql, primaryKeyValue);
                clickHouseJdbcTemplate.update(ClickHouseSqlBuilder.generateOptimizeSql(tableName));
                log.info("Successfully executed DELETE: {} with key {}", sql, primaryKeyValue);
            } catch (Exception e) {
                log.error("Failed to execute DELETE: {} with key {}", sql, primaryKeyValue, e);
            }
        }
    }

    /**
     * 将数据推送到ClickHouse
     * 批量执行SQL语句
     *
     * @param sql SQL语句
     * @param rows 行数据
     * @param columns 列名列表
     * @param columnTypeMap 列类型映射
     */
    private void pushToClickHouse(String sql, List<Serializable[]> rows, List<String> columns, Map<String, String> columnTypeMap) {
        try {
            clickHouseJdbcTemplate.batchUpdate(sql, new BatchPreparedStatementSetter() {
                @Override
                public void setValues(java.sql.PreparedStatement ps, int i) throws java.sql.SQLException {
                    Object[] row = rows.get(i);
                    for (int j = 0; j < row.length; j++) {
                        String columnName = columns.get(j);
                        Object value = row[j];
                        // 自动识别 DATETIME 字段并做处理
                        if ("DATETIME".equalsIgnoreCase(columnTypeMap.get(columnName))) {
                            if (value instanceof Long) {
                                Long timestamp = (Long) value;
                                // 转换为字符串格式 'YYYY-MM-DD HH:mm:ss'
                                value = TimeUtil.toDateTimeStringMs_0(timestamp);
                            }
                        }
                        ps.setObject(j + 1, value);
                    }
                }
                @Override
                public int getBatchSize() {
                    return rows.size();
                }
            });
            log.info("Successfully executed SQL: {}", sql);
        } catch (Exception e) {
            log.error("Failed to execute SQL: {}", sql, e);
        }
    }

    /**
     * 获取主键在行数据中的索引位置
     *
     * @param tableName 表名
     * @param primaryKey 主键名
     * @return 主键在行数据中的索引
     */
    private int getPrimaryKeyIndex(String tableName, String primaryKey) {
        List<String> columns = TableMetadataService.tableMetadataCache.getTableColumns(tableName);
        for (int i = 0; i < columns.size(); i++) {
            if (columns.get(i).equals(primaryKey)) {
                return i;
            }
        }
        throw new IllegalArgumentException("Primary key not found in columns: " + primaryKey);
    }

}
