package com.my.db.dataCleaning;

import java.io.*;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.*;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;

import javax.sql.DataSource;

import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

/**
 * 数据清洗与更新服务类
 * 功能：根据table1表中的part_id字段，从table2表中获取对应的code值并更新到table1表
 * 特性：分批次处理、批量更新、事务回滚、多线程处理、连接池管理、回滚文件记录、数据缓存
 *
 * 修改某个字段 根据另一张表的结果集
 */
public class DataCleaningAndUpdateCode {
    private static final Logger logger = LoggerFactory.getLogger(DataCleaningAndUpdateCode.class);

    // 数据库连接参数
    //在 MySQL Connector/J 8.x 及以上版本中，ZeroDatetimeBehavior 的枚举值名称从 CONVERTTONULL 改为 CONVERT_TO_NULL。
//    private static final String DB_URL = "jdbc:mysql://localhost:3306/your_database?useSSL=false&serverTimezone=UTC";
    private static final String DB_URL = "jdbc:mysql://xxxx/yilong?useUnicode=true&characterEncoding=utf8" +
            "&autoReconnect=true" +
//            "&zeroDateTimeBehavior=convertToNull" +// 适用于5.x版本
            "&zeroDateTimeBehavior=CONVERT_TO_NULL"+ //8.x 版本
            "&transformedBitIsBoolean=true" +
            "&allowPublicKeyRetrieval=true" +
            "&serverTimezone=Asia/Shanghai";
    private static final String DB_USER = "root";
    private static final String DB_PASSWORD = "xxxx";

    // 处理参数配置
    private static final int BATCH_SIZE = 999;         // 每批次处理数据量
    private static final int THREAD_POOL_SIZE = 4;      // 线程池大小
    private static DataSource dataSource;               // 数据库连接池

    private static final String id = "id";
//    private static final String TABLE1_NAME = "g_parcel";
    private static final String TABLE1_NAME = "sys_depart_main_body";
    //表1中的需要清洗的字段
    private static final String table1_thrid_id = "depart_id";
    //表1中的需要清洗为的字段
    private static final String table1_code_name = "depart_code";
//    private static final String table1_code_name = "sys_org_code";
    private static final String TABLE2_NAME = "sys_depart";
    //表2中的需要清洗的字段的原名称
    private static final String table2_code_name = "org_code";

    // 使用常量替换 SQL 语句中的硬编码部分
    // 静态 SQL 语句变量
    //获取所有table1需要更新的part_id（code为null的记录）
    //private static final String SELECT_PART_IDS_SQL = "SELECT part_id FROM table1 WHERE code IS NULL";
    private static final String SELECT_PART_IDS_SQL = "SELECT " + table1_thrid_id + " FROM " + TABLE1_NAME + " WHERE " + table1_code_name + " IS NULL";
    //根据 part_id 列表获取对应的 code 值
    //private static final String SELECT_CODES_BY_PART_IDS_SQL_TEMPLATE = "SELECT id, code FROM table2 WHERE id IN (%s)";
//    private static final String SELECT_CODES_BY_PART_IDS_SQL_TEMPLATE = "SELECT " + id + ", " + table2_code_name + " FROM " + TABLE2_NAME + " WHERE " + id + " IN (%s)";
    //更新表中的 code 值
    //private static final String UPDATE_CODE_SQL = "UPDATE table1 SET code =? WHERE part_id =?";
    private static final String UPDATE_CODE_SQL = "UPDATE " + TABLE1_NAME + " SET " + table1_code_name + " =? WHERE " + table1_thrid_id + " =?";
    // 回滚 SQL
    private static final String ROLLBACK_UPDATE_SQL = "UPDATE " + TABLE1_NAME + " SET " + table1_code_name + " = NULL WHERE " + table1_thrid_id + " =?";

    // 存储处理数据的文件路径
    private static final String DATA_FILE_PATH = "F:\\临时\\processed_data.txt";

    // 缓存 TABLE2 的数据，避免每次都查询数据库
    private static Map<String, String> table2Cache = new HashMap<>();

    // 静态初始化连接池和缓存数据
    static {
        HikariConfig config = new HikariConfig();
        config.setJdbcUrl(DB_URL);                      // 设置数据库连接URL
        config.setUsername(DB_USER);                    // 设置数据库用户名
        config.setPassword(DB_PASSWORD);                // 设置数据库密码
        config.setMaximumPoolSize(THREAD_POOL_SIZE);    // 设置最大连接数
        config.setConnectionTimeout(30000);            // 设置连接超时时间(毫秒)
        config.setIdleTimeout(600000);                 // 设置空闲连接超时时间(毫秒)
        config.setMaxLifetime(1800000);                // 设置连接最长存活时间(毫秒)
        config.setMinimumIdle(2); // 设置最小空闲连接数
        dataSource = new HikariDataSource(config);      // 初始化HikariCP连接池

        // 缓存 TABLE2 的数据
        try (Connection conn = dataSource.getConnection();
             PreparedStatement ps = conn.prepareStatement("SELECT " + id + ", " + table2_code_name + " FROM " + TABLE2_NAME)) {
            try (ResultSet rs = ps.executeQuery()) {
                while (rs.next()) {
                    String partId = rs.getString(id);
                    String code = rs.getString(table2_code_name);
                    // 将查询到的 TABLE2 数据存入缓存
                    table2Cache.put(partId, code);
                }
            }
        } catch (SQLException e) {
            logger.error("读取 table2 数据失败", e);
        }
    }

    /**
     * 使用连接池提高数据库访问性能
     * 分批次处理减少内存占用
     * 批量操作减少数据库交互次数
     * 多线程并行处理提高吞吐量
     * 事务控制保证数据一致性
     * 跳过已存在 code 值的记录
     * 异常处理和日志记录增强健壮性
     * 参数化配置方便调整处理策略
     * @param args
     */
    public static void main(String[] args) {
        try {
             performDataCleaningAndUpdate();                       // 执行数据清洗和更新
//            rollbackFromFile();
        } catch (SQLException | InterruptedException | IOException e) {
            logger.error("数据处理或回滚失败", e);
        }
    }

    /**
     * 核心处理方法
     * 1. 获取所有待处理的part_id
     * 2. 分批次并行处理
     * 执行数据清洗和更新操作
     * @throws SQLException 数据库操作异常
     * @throws InterruptedException 线程中断异常
     */
    public static void performDataCleaningAndUpdate() throws SQLException, InterruptedException, IOException {
        ExecutorService executorService = Executors.newFixedThreadPool(THREAD_POOL_SIZE);
        List<String> partIds = getAllConversionIds();
        int totalBatches = (int) Math.ceil((double) partIds.size() / BATCH_SIZE);
        AtomicInteger totalProcessed = new AtomicInteger();
        BufferedWriter writer = new BufferedWriter(new FileWriter(DATA_FILE_PATH));
//        PrintWriter writer = new PrintWriter(new FileWriter(DATA_FILE_PATH, true));

        for (int i = 0; i < totalBatches; i++) {
            int startIndex = i * BATCH_SIZE;
            int endIndex = Math.min(startIndex + BATCH_SIZE, partIds.size());
            List<String> batchPartIds = partIds.subList(startIndex, endIndex);
            logger.info("线程池任务：批次 {}，处理数据范围：{} - {}", i, startIndex, endIndex);
            executorService.submit(() -> {
                try {
                    if (batchPartIds.isEmpty()) return;
                    int processed = processBatch(batchPartIds, writer);
                    totalProcessed.addAndGet(processed);
                    logger.info("本批次清洗数据量: {}", processed);
                } catch (SQLException | IOException e) {
                    logger.error("批次处理失败：", e);
                    try {
                        Thread.sleep(1000);
                        int processed = processBatch(batchPartIds, writer);
                        totalProcessed.addAndGet(processed);
                        logger.info("重试成功，本批次清洗数据量: {}", processed);
                    } catch (SQLException | IOException | InterruptedException ex) {
                        logger.error("重试失败，本批次清洗数据量未更新", ex);
                    }
                }
            });
        }

        executorService.shutdown();
        executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        writer.close();
        logger.info("数据清理完成，总共清洗数据量: {}", totalProcessed);
    }

    /**
     *
     * 获取所有需要更新的数据的id
     * @return part_id列表
     * @throws SQLException 数据库操作异常
     */
    private static List<String> getAllConversionIds() throws SQLException {
        List<String> partIds = new ArrayList<>();
        try (Connection conn = dataSource.getConnection();
             PreparedStatement ps = conn.prepareStatement(SELECT_PART_IDS_SQL)) {
            try (ResultSet rs = ps.executeQuery()) {
                while (rs.next()) {
                    String partId = rs.getString(table1_thrid_id);
                    if (!partIds.contains(partId)) {
                        partIds.add(partId);
                    }
                }
            }
        }
        return partIds;
    }
    /**
     * 处理单个批次的数据
     * @param batchPartIds 当前批次的part_id列表
     * @throws SQLException 数据库操作异常
     */
    private static int processBatch(List<String> batchPartIds, BufferedWriter writer) throws SQLException, IOException {
        if (batchPartIds == null || batchPartIds.isEmpty()) {
            return 0;
        }
        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(false);                  // 开启事务
            try {
                Map<String, String> codeMap = getCodesForPartIds(batchPartIds);
                int[] updateCounts = updateCodesInTable1(conn, codeMap, writer);
                conn.commit();                          // 提交事务
                int processed = 0;
                for (int count : updateCounts) {
                    processed += count;
                }
                logger.info("实际处理数据量: {}", processed);
                return processed;
            } catch (SQLException | IOException e) {
                conn.rollback();                        // 事务回滚
                throw e;
            }
        }
    }


    /**
     * 根据part_id批量查询对应的code值，从缓存中获取数据
     * @param partIds part_id列表
     * @return part_id到code的映射
     */
    private static Map<String, String> getCodesForPartIds(List<String> partIds) {
        Map<String, String> codeMap = new HashMap<>();
        for (String partId : partIds) {
            if (table2Cache.containsKey(partId)) {
                // 从缓存中获取对应 code 值
                codeMap.put(partId, table2Cache.get(partId));
            }
        }
        return codeMap;
    }

    /**
     * 批量更新table1表的code字段，并记录更新数据到回滚文件
     * @param conn 数据库连接
     * @param codeMap part_id到code的映射
     * @throws SQLException 数据库操作异常
     */
    private static int[] updateCodesInTable1(Connection conn, Map<String, String> codeMap, BufferedWriter writer) throws SQLException, IOException {
        try (PreparedStatement ps = conn.prepareStatement(UPDATE_CODE_SQL)) {
            for (Map.Entry<String, String> entry : codeMap.entrySet()) {
                ps.setString(1, entry.getValue());
                ps.setString(2, entry.getKey());
                ps.addBatch();                          // 添加到批处理
                // 记录更新的数据到回滚文件，格式为 part_id,code
                synchronized (writer) {
                    writer.write(entry.getKey() + "," + entry.getValue());
                    writer.newLine();
                }
            }
            return ps.executeBatch();                          // 执行批处理
        }
    }

    /**
     * 从回滚文件中读取数据进行回滚操作
     * @throws IOException 文件操作异常
     * @throws SQLException 数据库操作异常
     * @throws InterruptedException 线程中断异常
     */
    public static void rollbackFromFile() throws IOException, SQLException, InterruptedException {
        List<String> allLines = new ArrayList<>();
        BufferedReader reader = new BufferedReader(new FileReader(DATA_FILE_PATH));
        String line;
        // 读取回滚文件中的所有行
        while ((line = reader.readLine()) != null) {
            allLines.add(line);
        }
        reader.close();

        int totalBatches = (int) Math.ceil((double) allLines.size() / BATCH_SIZE);
        AtomicInteger totalRolledBack = new AtomicInteger();
        ExecutorService executorService = Executors.newFixedThreadPool(THREAD_POOL_SIZE);

        // 分批次提交回滚任务到线程池
        for (int i = 0; i < totalBatches; i++) {
            int startIndex = i * BATCH_SIZE;
            int endIndex = Math.min(startIndex + BATCH_SIZE, allLines.size());
            List<String> batch = allLines.subList(startIndex, endIndex);
            executorService.submit(() -> {
                try {
                    if (batch.isEmpty()) return;
                    int rolledBack = processRollbackBatch(batch);
                    totalRolledBack.addAndGet(rolledBack);
                    logger.info("本批次回滚数据量: {}", rolledBack);
                } catch (SQLException e) {
                    logger.error("批次回滚失败，正在尝试重试...", e);
                    try {
                        Thread.sleep(1000);
                        int rolledBack = processRollbackBatch(batch);
                        totalRolledBack.addAndGet(rolledBack);
                        logger.info("重试成功，本批次回滚数据量: {}", rolledBack);
                    } catch (SQLException | InterruptedException ex) {
                        logger.error("重试失败，批次回滚数据量未更新", ex);
                    }
                }
            });
        }

        // 优雅关闭线程池
        executorService.shutdown();
        executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        logger.info("数据回滚完成，总共回滚数据量: {}", totalRolledBack);
    }

    /**
     * 处理单个批次的回滚数据
     * @param batch 当前批次的回滚数据行列表
     * @return 回滚的数据量
     * @throws SQLException 数据库操作异常
     */
    private static int processRollbackBatch(List<String> batch) throws SQLException {
        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(false);
            try (PreparedStatement ps = conn.prepareStatement(ROLLBACK_UPDATE_SQL)) {
                for (String line : batch) {
                    String[] parts = line.split(",");
                    String partId = parts[0];
                    ps.setString(1, partId);
                    ps.addBatch();
                }
                int[] updateCounts = ps.executeBatch();
                conn.commit();
                int rolledBack = 0;
                for (int count : updateCounts) {
                    rolledBack += count;
                }
                return rolledBack;
            } catch (SQLException e) {
                conn.rollback();
                throw e;
            }
        }
    }
}