package com.my.db.dataCleaning;

import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import javax.sql.DataSource;
import java.io.*;
import java.sql.Connection;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;

/**
 * 数据清洗与更新服务类
 * 功能：根据table表中的某个字段--当前修改name
 * 特性：分批次处理、批量更新、事务回滚、多线程处理、连接池管理、回滚文件记录、数据缓存
 *
 * 修改表的name---规则修改  REPLACEMENT_RULES.put("社区居民委员会", "社区");
 */
public class DataCleaningAndUpdateFiled {
    private static final Logger logger = LoggerFactory.getLogger(DataCleaningAndUpdateFiled.class);

    // 数据库连接参数
    private static final String DB_URL = "jdbc:mysql://xxxx/yilong?useUnicode=true&characterEncoding=utf8" +
            "&autoReconnect=true" +
            "&zeroDateTimeBehavior=CONVERT_TO_NULL" +
            "&transformedBitIsBoolean=true" +
            "&allowPublicKeyRetrieval=true" +
            "&serverTimezone=Asia/Shanghai";
    private static final String DB_USER = "root";
    private static final String DB_PASSWORD = "qxxx";

    // 处理参数配置
    private static final int BATCH_SIZE = 999;         // 每批次处理数据量
    private static final int THREAD_POOL_SIZE = 4;      // 线程池大小
    private static DataSource dataSource;               // 数据库连接池

    private static final String TABLE_NAME = "sys_depart";
    private static final String table_filed = "depart_name";
    private static final String table_filter_sql = "org_category = '5'";

    // 静态 SQL 语句变量
    private static final String SELECT_DATA_SQL = "SELECT id," + table_filed + " FROM " + TABLE_NAME + " WHERE " + table_filter_sql;
    private static final String UPDATE_SQL = "UPDATE " + TABLE_NAME + " SET " + table_filed + " =? WHERE id =?";
    private static final String ROLLBACK_UPDATE_SQL = "UPDATE " + TABLE_NAME + " SET " + table_filed + " =? WHERE id =?";

    // 静态配置的修改规则 Map
    private static final Map<String, String> REPLACEMENT_RULES = new HashMap<>();
    //修改村镇映射
    static {
        // 可以根据需求添加更多规则
        REPLACEMENT_RULES.put("社区居民委员会", "社区");
        REPLACEMENT_RULES.put("村民委员会", "村");
    }

    private static final String DATA_FILE_PATH = "F:\\临时\\processed_data.txt";

    static {
        HikariConfig config = new HikariConfig();
        config.setJdbcUrl(DB_URL);
        config.setUsername(DB_USER);
        config.setPassword(DB_PASSWORD);
        config.setMaximumPoolSize(THREAD_POOL_SIZE);
        config.setConnectionTimeout(30000);
        config.setIdleTimeout(600000);
        config.setMaxLifetime(1800000);
        config.setMinimumIdle(2);
        dataSource = new HikariDataSource(config);
    }

    public static void main(String[] args) {
        try {
            performDataCleaningAndUpdate();
//            rollbackFromFile();
        } catch (SQLException | InterruptedException | IOException e) {
            logger.error("数据处理或回滚失败", e);
        }
    }

    public static void performDataCleaningAndUpdate() throws SQLException, InterruptedException, IOException {
        ExecutorService executorService = Executors.newFixedThreadPool(THREAD_POOL_SIZE);
        Map<String, String> idToOldValue = getAllConversionIds();
        List<String> ids = new ArrayList<>(idToOldValue.keySet());
        int totalBatches = (int) Math.ceil((double) ids.size() / BATCH_SIZE);
        AtomicInteger totalProcessed = new AtomicInteger();
        BufferedWriter writer = new BufferedWriter(new FileWriter(DATA_FILE_PATH));

        for (int i = 0; i < totalBatches; i++) {
            int startIndex = i * BATCH_SIZE;
            int endIndex = Math.min(startIndex + BATCH_SIZE, ids.size());
            List<String> batchIds = ids.subList(startIndex, endIndex);
            logger.info("线程池任务：批次 {}，处理数据范围：{} - {}", i, startIndex, endIndex);
            executorService.submit(() -> {
                try {
                    if (batchIds.isEmpty()) return;
                    int processed = processBatch(batchIds, writer, idToOldValue);
                    totalProcessed.addAndGet(processed);
                    logger.info("本批次清洗数据量: {}", processed);
                } catch (SQLException | IOException e) {
                    logger.error("批次处理失败：", e);
                    try {
                        Thread.sleep(1000);
                        int processed = processBatch(batchIds, writer, idToOldValue);
                        totalProcessed.addAndGet(processed);
                        logger.info("重试成功，本批次清洗数据量: {}", processed);
                    } catch (SQLException | IOException | InterruptedException ex) {
                        logger.error("重试失败，本批次清洗数据量未更新", ex);
                    }
                }
            });
        }

        executorService.shutdown();
        executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        writer.close();
        logger.info("数据清理完成，总共清洗数据量: {}", totalProcessed);
    }

    private static Map<String, String> getAllConversionIds() throws SQLException {
        Map<String, String> idToOldValue = new HashMap<>();
        try (Connection conn = dataSource.getConnection();
             PreparedStatement ps = conn.prepareStatement(SELECT_DATA_SQL)) {
            try (ResultSet rs = ps.executeQuery()) {
                while (rs.next()) {
                    String id = rs.getString("id");
                    String oldValue = rs.getString(table_filed);
                    idToOldValue.put(id, oldValue);
                }
            }
        }
        return idToOldValue;
    }

    private static int processBatch(List<String> batchPartIds, BufferedWriter writer, Map<String, String> idToOldValue) throws SQLException, IOException {
        if (batchPartIds == null || batchPartIds.isEmpty()) {
            return 0;
        }
        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(false);
            try {
                Map<String, String> newValues = getUpdateForIds(batchPartIds, idToOldValue);
                Map<String, String> updateMap = new HashMap<>();
                for (String id : batchPartIds) {
                    String newValue = newValues.get(id);
                    if (newValue != null) {
                        updateMap.put(id, newValue);
                        synchronized (writer) {
                            writer.write(id + "," + idToOldValue.get(id));
                            writer.newLine();
                        }
                    }
                }
                int[] updateCounts = updateCodesInTable1(conn, updateMap);
                conn.commit();
                int processed = 0;
                for (int count : updateCounts) {
                    processed += count;
                }
                logger.info("实际处理数据量: {}", processed);
                return processed;
            } catch (SQLException | IOException e) {
                conn.rollback();
                throw e;
            }
        }
    }

    /**
     * 修改规则
     * @param partIds
     * @param idToOldValue
     * @return
     */
    private static Map<String, String> getUpdateForIds(List<String> partIds, Map<String, String> idToOldValue) {
        Map<String, String> codeMap = new HashMap<>();
        for (String partId : partIds) {
            String oldValue = idToOldValue.get(partId);
            String newValue = oldValue;
            if (oldValue != null) {
                for (Map.Entry<String, String> rule : REPLACEMENT_RULES.entrySet()) {
                    if (oldValue.endsWith(rule.getKey())) {
                        newValue = oldValue.replace(rule.getKey(), rule.getValue());
                        break;
                    }
                }
            }
            codeMap.put(partId, newValue);
        }
        return codeMap;
    }

    private static int[] updateCodesInTable1(Connection conn, Map<String, String> updateMap) throws SQLException {
        try (PreparedStatement ps = conn.prepareStatement(UPDATE_SQL)) {
            for (Map.Entry<String, String> entry : updateMap.entrySet()) {
                ps.setString(1, entry.getValue());
                ps.setString(2, entry.getKey());
                ps.addBatch();
            }
            return ps.executeBatch();
        }
    }

    public static void rollbackFromFile() throws IOException, SQLException, InterruptedException {
        List<String> allLines = new ArrayList<>();
        BufferedReader reader = new BufferedReader(new FileReader(DATA_FILE_PATH));
        String line;
        while ((line = reader.readLine()) != null) {
            allLines.add(line);
        }
        reader.close();

        int totalBatches = (int) Math.ceil((double) allLines.size() / BATCH_SIZE);
        AtomicInteger totalRolledBack = new AtomicInteger();
        ExecutorService executorService = Executors.newFixedThreadPool(THREAD_POOL_SIZE);

        for (int i = 0; i < totalBatches; i++) {
            int startIndex = i * BATCH_SIZE;
            int endIndex = Math.min(startIndex + BATCH_SIZE, allLines.size());
            List<String> batch = allLines.subList(startIndex, endIndex);
            executorService.submit(() -> {
                try {
                    if (batch.isEmpty()) return;
                    int rolledBack = processRollbackBatch(batch);
                    totalRolledBack.addAndGet(rolledBack);
                    logger.info("本批次回滚数据量: {}", rolledBack);
                } catch (SQLException e) {
                    logger.error("批次回滚失败，正在尝试重试...", e);
                    try {
                        Thread.sleep(1000);
                        int rolledBack = processRollbackBatch(batch);
                        totalRolledBack.addAndGet(rolledBack);
                        logger.info("重试成功，本批次回滚数据量: {}", rolledBack);
                    } catch (SQLException | InterruptedException ex) {
                        logger.error("重试失败，批次回滚数据量未更新", ex);
                    }
                }
            });
        }

        executorService.shutdown();
        executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
        logger.info("数据回滚完成，总共回滚数据量: {}", totalRolledBack);
    }

    private static int processRollbackBatch(List<String> batch) throws SQLException {
        try (Connection conn = dataSource.getConnection()) {
            conn.setAutoCommit(false);
            try (PreparedStatement ps = conn.prepareStatement(ROLLBACK_UPDATE_SQL)) {
                for (String line : batch) {
                    String[] parts = line.split(",");
                    String id = parts[0];
                    String oldValue = parts[1];
                    ps.setString(1, oldValue);
                    ps.setString(2, id);
                    ps.addBatch();
                }
                int[] updateCounts = ps.executeBatch();
                conn.commit();
                int rolledBack = 0;
                for (int count : updateCounts) {
                    rolledBack += count;
                }
                return rolledBack;
            } catch (SQLException e) {
                conn.rollback();
                throw e;
            }
        }
    }
}