// com/tidu/strategy/SingleTableSyncDisruptorStrategy.java
package com.tidu.strategy;

import com.tidu.strategy.abstractSync.AbstractSyncStrategy;
import com.tidu.strategy.task.ParallelWriteManager;
import com.tidu.utils.DbUtilsTemplate;
import lombok.Getter;
import lombok.extern.slf4j.Slf4j;

import javax.sql.DataSource;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.StandardOpenOption;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

@Slf4j
@Getter
public class SingleTableSyncStrategy extends AbstractSyncStrategy {

    public String tableName; // 表名
    private final int batchSize = 50000; // 增大批次大小
    private Long totalRecords = 0L; // 总记录数
    private Long offset = 0L; // 游标字段，记录上一次同步的最后一条数据的 id
    private final DbUtilsTemplate sourceDbUtils;
    private final DbUtilsTemplate targetDbUtils;
    private Long processedCount = 0L; // 已处理记录数

    // 并行写入管理器
    private final ParallelWriteManager parallelWriteManager;
    private final int parallelThreadCount = 8; // 增加线程数
    private final int parallelBatchSize = 10000; // 增加并行批次大小

    public SingleTableSyncStrategy (String tableName, DataSource sourceDataSource, DataSource targetDataSource) {
        super(sourceDataSource, targetDataSource);
        this.tableName = tableName;
        this.sourceDbUtils = new DbUtilsTemplate(sourceDataSource);
        this.targetDbUtils = new DbUtilsTemplate(targetDataSource);
        this.offset = loadOffset(); // 加载断点
        createTargetTableIfNotExists(); // 自动建表

        // 初始化并行写入管理器
        this.parallelWriteManager = new ParallelWriteManager(
                this.targetDbUtils,
                this.tableName,
                this.parallelThreadCount,
                this.parallelBatchSize
        );
    }

    /**
     * 获取数据（使用 DbUtilsTemplate）
     */
    @Override
    public List<Map<String, Object>> fetchData() {
       /* String subQuery;
        if (lastId == 0) {
            // 首次查询
            subQuery = "(SELECT id FROM " + tableName + " ORDER BY id LIMIT ?) AS t2";
        } else {
            // 后续查询
            subQuery = "(SELECT id FROM " + tableName + " WHERE id > ? ORDER BY id LIMIT ?) AS t2";
        }
        String sql = "SELECT t1.* FROM " + tableName + " AS t1 JOIN " + subQuery + " ON t1.id = t2.id ORDER BY t1.id";
        try {
            if (lastId == 0) {
                return sourceDbUtils.queryForMapList(sql, batchSize);
            } else {
                return sourceDbUtils.queryForMapList(sql, lastId, batchSize);
            }
        } catch (SQLException e) {
            throw new RuntimeException("Error fetching data from source", e);
        }*/
        String sql = "SELECT * FROM " + tableName + " LIMIT ? OFFSET ?";
        try {
            List<Map<String, Object>> list = sourceDbUtils.queryForMapList(sql, batchSize, offset);
            offset += batchSize;
            return list;
        } catch (SQLException e) {
            throw new RuntimeException("Error fetching data from source", e);
        }
    }

    /**
     * 写入数据（使用并行写入提高性能）
     */
    @Override
    public void writeData(List<Map<String, Object>> dataList) {
        if (dataList.isEmpty()) return;

        try {
            // 对于大数据量使用并行写入 (降低阈值以更频繁使用并行)
            if (dataList.size() > parallelBatchSize) {
                log.debug("Using parallel write for batch of {} records", dataList.size());
                long written = parallelWriteManager.writeDataInParallel(dataList);
                processedCount += written;
            } else {
                // 小数据量使用优化的单线程写入
                log.debug("Using optimized sequential write for batch of {} records", dataList.size());
                writeDataOptimized(dataList);
                processedCount += dataList.size();
            }

            /*// 更新 lastId 为最后一条记录的 id
            if (!dataList.isEmpty()) {
                this.lastId = (Long) dataList.get(dataList.size() - 1).get("id");
            }*/

            // 每处理5批保存一次offset，减少IO操作
            saveOffset();

        } catch (Exception e) {
            log.error("Error writing data batch of size {}: {}", dataList.size(), e.getMessage(), e);
            throw new RuntimeException("Error writing data to target", e);
        }
    }

    /**
     * 优化的单线程写入方法
     */
    private void writeDataOptimized(List<Map<String, Object>> dataList) throws SQLException {
        if (dataList.isEmpty()) return;

        Connection conn = null;
        try {
            conn = targetDbUtils.getConnection();
            conn.setAutoCommit(false);

            // 获取列信息并构建UPSERT SQL
            List<String> columns = new ArrayList<>(dataList.get(0).keySet());
            String upsertSql = buildUpsertSQL(columns);

            try (PreparedStatement ps = conn.prepareStatement(upsertSql)) {
                // 优化的批处理 - 每1000条执行一次
                for (int i = 0; i < dataList.size(); i++) {
                    Map<String, Object> map = dataList.get(i);
                    for (int j = 0; j < columns.size(); j++) {
                        Object value = map.get(columns.get(j));
                        ps.setObject(j + 1, value);
                    }
                    ps.addBatch();

                    // 每1000条记录执行一次批处理
                    if ((i + 1) % 1000 == 0) {
                        ps.executeBatch();
                        ps.clearBatch();
                    }
                }

                // 执行剩余的批处理
                ps.executeBatch();
                ps.clearBatch();
            }

            conn.commit();

        } catch (Exception e) {
            if (conn != null) {
                try {
                    conn.rollback();
                } catch (SQLException rollbackEx) {
                    log.error("Failed to rollback transaction", rollbackEx);
                }
            }
            throw e;
        } finally {
            if (conn != null) {
                try {
                    conn.close();
                } catch (SQLException e) {
                    log.warn("Failed to close connection", e);
                }
            }
        }
    }

    /**
     * 构建 UPSERT SQL 语句
     */
    private String buildUpsertSQL(List<String> columns) {
        StringBuilder sqlBuilder = new StringBuilder();
        sqlBuilder.append("INSERT INTO ").append(tableName).append(" (").append(String.join(",", columns)).append(") VALUES (")
                .append(columns.stream().map(k -> "?").collect(Collectors.joining(","))).append(") ON DUPLICATE KEY UPDATE ");

        // 构建更新部分，排除主键字段
        String updateClause = columns.stream()
                .filter(column -> !column.equals("id"))
                .map(column -> column + " = VALUES(" + column + ")")
                .collect(Collectors.joining(", "));

        // 如果没有非主键字段可更新，则至少更新一个字段为自身值
        if (updateClause.isEmpty()) {
            updateClause = "id = VALUES(id)";
        }

        sqlBuilder.append(updateClause);
        return sqlBuilder.toString();
    }

    /**
     * 判断是否同步完成
     */
    @Override
    public boolean isCompleted() {
        try {
            if (totalRecords == null || totalRecords <= 0) {
                String sql = "SELECT COUNT(*) FROM " + tableName;
                this.totalRecords = sourceDbUtils.queryForScalar(sql, Long.class);
            }

            return processedCount >= totalRecords;
        } catch (SQLException e) {
            throw new RuntimeException("Error checking completion", e);
        }
    }

    /**
     * 同步进度显示
     */
    private final long startTime = System.currentTimeMillis(); // 添加在类中

    @Override
    public String getProgress() {

        long currentProcessed = processedCount;
        try {
            if (totalRecords == null || totalRecords <= 0) {
                String sql = "SELECT COUNT(*) FROM " + tableName;
                this.totalRecords = sourceDbUtils.queryForScalar(sql, Long.class);
            }

            int progress = Math.min(100, (int) ((double) currentProcessed / totalRecords * 100));
            long elapsedSeconds = (System.currentTimeMillis() - startTime) / 1000;
            double avgSpeed = currentProcessed / (double) Math.max(1, elapsedSeconds);
            long remaining = (long) ((totalRecords - currentProcessed) / Math.max(1, avgSpeed));

            return String.format("同步 %d / %d | 进度: %d%% | 平均速度: %.1f 条/秒 | 剩余时间: %d 秒",
                    currentProcessed, totalRecords, progress, avgSpeed, remaining);
        } catch (Exception e) {
            log.warn("获取总数据量失败", e);
            return String.format("已同步 %d 条数据 (无法获取总数)", currentProcessed);
        }
    }

    /**
     * 自动建表逻辑
     */
    private void createTargetTableIfNotExists() {
        try {
            // 先检查目标表是否存在
            if (isTableExists(targetDataSource, tableName)) {
                log.info("Table {} already exists in target database. Skipping creation.", tableName);
                return;
            }

            // 如果不存在，再执行建表语句
            String createTableSql = getCreateTableSqlFromSource();
            targetDbUtils.update(createTableSql);
            log.info("Table {} created successfully in target database.", tableName);
        } catch (Exception e) {
            throw new RuntimeException("Error creating table in target", e);
        }
    }

    /**
     * 检查目标数据库中是否存在指定表
     */
    private boolean isTableExists(DataSource dataSource, String tableName) throws SQLException {
        String sql = "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = DATABASE() AND table_name = ?";
        Long count = targetDbUtils.queryForScalar(sql, Long.class, tableName);
        return count != null && count > 0;
    }

    /**
     * 获取源表的建表语句
     */
    private String getCreateTableSqlFromSource() {
        try (Connection conn = sourceDataSource.getConnection(); Statement stmt = conn.createStatement(); ResultSet rs = stmt.executeQuery("SHOW CREATE TABLE " + tableName)) {

            if (rs.next()) {
                return rs.getString("Create Table");
            }
            throw new RuntimeException("Table " + tableName + " not found");
        } catch (Exception e) {
            throw new RuntimeException("Error fetching create table sql", e);
        }
    }

    /**
     * 保存 offset 到本地文件（断点续传）
     */
    public void saveOffset() {
        try {
            Path path = Paths.get("offset_" + tableName + ".txt");
            String content = String.valueOf(offset);
            Files.write(path, content.getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING);
        } catch (IOException e) {
            log.warn("Error saving offset", e);
        }
    }

    /**
     * 从本地文件读取 offset（断点续传）
     */
    public Long loadOffset() {
        try {
            Path path = Paths.get("offset_" + tableName + ".txt");
            if (Files.exists(path)) {
                String lastIdStr = new String(Files.readAllBytes(path));
                return Long.parseLong(lastIdStr);
            }
            return 0L;
        } catch (Exception e) {
            return 0L;
        }
    }

    /**
     * 关闭资源
     */
    public void shutdown() {
        if (parallelWriteManager != null) {
            parallelWriteManager.shutdown();
        }
    }
}
