package com.example.demo.demos.service.impl;

import com.fasterxml.jackson.core.JsonFactory;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.core.JsonToken;
import com.fasterxml.jackson.databind.DeserializationFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
import org.apache.commons.compress.compressors.gzip.GzipCompressorInputStream;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;

import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;
import java.io.*;
import java.math.BigDecimal;
import java.nio.charset.StandardCharsets;
import java.nio.file.*;
import java.sql.*;
import java.sql.Date;
import java.util.*;
import java.util.concurrent.atomic.AtomicLong;
import java.util.stream.Collectors;
import java.util.stream.Stream;

/**
 * 走本地数据插入
 */
@Service
public class LocalSyncService {
    private static final Logger logger = LoggerFactory.getLogger(LocalSyncService.class);
    private static final String DEFAULT_PK_FIELD = "id";
    private static final int MAX_RETRIES = 3;
    private static final long RETRY_DELAY_MS = 1000;
    private static final int BATCH_RETRY_COUNT = 2;

    // 配置参数
    @Value("${local.data.path}")
    private String localDataPath; // 本地数据根目录

    @Value("${db.url}")
    private String dbUrl;
    @Value("${db.username}")
    private String dbUsername;
    @Value("${db.password}")
    private String dbPassword;
    @Value("${batch.size:500}")
    private int batchSize;
    @Value("${isSynchronousAll}")
    private int isSynchronousAll;

    // Jackson对象（线程安全）
    private final ObjectMapper objectMapper = new ObjectMapper()
            .disable(DeserializationFeature.FAIL_ON_UNKNOWN_PROPERTIES);

    // 状态管理
    private final Map<String, Map<String, Integer>> tableSchemaCache = new HashMap<>();
    private static Integer countNumber = 0;
    private volatile boolean isRunning = false;

    private static final String[] EXCLUDED_TABLES = {
            "annual_report_asset",
            "annual_report_invest",
            "bankruptcy_public_announcement",
            "company_base",
            "company_business_import_export_credit",
            "company_chattel_mortgage",
            "company_employee",
            "company_industry",
            "company_justice",
            "company_justice_unfreeze",
            "company_partner_pay",
            "company_punishment_info_creditchina_new",
            "company_simple_cancel_objection",
            "ip_copyright_works",
            "ip_trademark_applicant_list",
            "opt_ent_partner_cn",
            "qimingpian_product",
            "risk_court_filed_case_litigant",
            "risk_environment_punish",
            "risk_restricted_outbound",
            "risk_zhongben",
            "special_hongkong_company",
            "stock_allotment",
            "stock_equity_structure",
            "stock_issue",
            "tb_company_sme",
            "tb_project_jingpin",



            "annual_report_base",
            "annual_report_partner",
            "bankruptcy_public_cases",
            "company_base_clean",
            "company_cancel_notice",
            "company_chattel_pawn",
            "company_employment",
            "company_industry_model",
            "company_justice_equity_change",
            "company_license_info_creditchina_new",
            "company_partner_realpay",
            "company_qy_partner",
            "company_stock_change",
            "ip_copyright_works_list",
            "ip_trademark_category_list",
            "opt_ip_patent_affair_status",
            "qimingpian_team_member",
            "risk_court_notice",
            "risk_evaluate_org",
            "risk_shixin",
            "sf_cpws",
            "special_hongkong_company_history",
            "stock_announcement",
            "stock_executive",
            "sys_cat",
            "tb_general_taxpayer",
            "tm_class_dict",




            "annual_report_change",
            "annual_report_social_security",
            "bankruptcy_public_cases_list",
            "company_bid_entity_list_new",
            "company_change",
            "company_check",
            "company_history_name",
            "company_intellectual",
            "company_justice_freeze",
            "company_logo",
            "company_pledge",
            "company_random_check",
            "investment_event",
            "ip_patent",
            "ip_trademark_flow_list",
            "opt_ip_patent_info",
            "risk_court_announcement",
            "risk_court_notice_list",
            "risk_evaluate_org_list",
            "risk_tax_punish",
            "sf_cpws_dsr",
            "special_law_office",
            "stock_base",
            "stock_financial_analysis",
            "tax_credit",
            "tb_icp_base_info",
            "tradable_stock_holder",



            "annual_report_equity_change",
            "annual_report_website",
            "company_abnormal",
            "company_bid_main",
            "company_chattel",
            "company_clear",
            "company_illegal",
            "company_intellectual_change",
            "company_justice_invalid",
            "company_own_tax_info",
            "company_profile",
            "company_random_check_result",
            "ip_copyright_software",
            "ip_patent_applicant_list",
            "land_publicity",
            "opt_ip_patent_patentee",
            "risk_court_announcement_list",
            "risk_court_service_announcement",
            "risk_evaluate_result",
            "risk_xianxiao",
            "special_enterprise",
            "special_social_organ",
            "stock_bonus",
            "stock_holder",
            "tb_certificate_detail_integrate",
            "tb_judicial_sale",



            "annual_report_guarantee",
            "bankruptcy_judicative_paper",
            "company_allow",
            "company_branch",
            "company_chattel_change",
            "company_customs_import_export_credit_rating",
            "company_import_export_administr_penalty",
            "company_investor",
            "company_justice_keep_freeze",
            "company_partner",
            "company_punish",
            "company_simple_cancel",
            "ip_copyright_software_list",
            "ip_trademark",
            "land_result_announcement",
            "qimingpian_history_rongzi",
            "risk_court_filed_case",
            "risk_court_service_announcement_litigant",
            "risk_evaluate_result_list",
            "risk_zhixing",
            "special_gov_unit",
            "special_trade_union",
            "stock_equity_change",
            "stock_holding",
            "tb_certificate_integrate",
            "tb_judicial_sale_info_company",
    };

    // 数据库连接池
    private static HikariDataSource dataSource;

    @PostConstruct
    public void initDataSource() {
        HikariConfig config = new HikariConfig();
        config.setJdbcUrl(dbUrl);
        config.setUsername(dbUsername);
        config.setPassword(dbPassword);
        config.setMaximumPoolSize(5);
        config.setConnectionTimeout(30000);
        config.setIdleTimeout(600000);
        config.setMaxLifetime(1800000);
        config.setAutoCommit(false);
        config.setConnectionTestQuery("SELECT 1");

        dataSource = new HikariDataSource(config);
        logger.info("数据库连接池初始化完成");
    }

    @PostConstruct
    public void init() throws IOException {
        validateAndCreateDirectory(localDataPath);
        logger.info("本地文件同步服务初始化完成");
    }

    private void validateAndCreateDirectory(String path) throws IOException {
        Path dirPath = Paths.get(path);
        if (!Files.exists(dirPath)) {
            Files.createDirectories(dirPath);
            logger.info("已创建目录: {}", dirPath);
        }
        if (!Files.isReadable(dirPath)) {
            throw new IOException("目录不可读: " + dirPath);
        }
        if (!Files.isWritable(dirPath)) {
            throw new IOException("目录不可写: " + dirPath);
        }
    }

    @PreDestroy
    public void cleanup() {
        if (dataSource != null) {
            dataSource.close();
            logger.info("数据库连接池已关闭");
        }
    }

    public void syncAllTables() {
        if (isRunning) {
            logger.warn("上一次同步任务仍在执行，跳过本次执行");
            return;
        }
        isRunning = true;
        Connection conn = null;
        try {
            conn = getConnection();
            processLocalDirectories(conn);
        } catch (Exception e) {
            logger.error("同步任务失败", e);
        } finally {
            if (conn != null) {
                try {
                    conn.close();
                } catch (SQLException e) {
                    logger.error("关闭连接失败", e);
                }
            }
            isRunning = false;
        }
    }

    private void processLocalDirectories(Connection conn) throws Exception {
        List<String> tableDirs = listLocalDirectories(localDataPath);
        for (String tableDir : tableDirs) {
            try {
                processTableDirectory(conn, tableDir);
            } catch (Exception e) {
                logger.error("处理表目录 {} 失败，跳过继续处理其他表: {}", tableDir, e.getMessage(), e);
            }
        }
        countNumber += 2;
    }

    private List<String> listLocalDirectories(String path) throws IOException {
        try (Stream<Path> paths = Files.list(Paths.get(path))) {
            return paths
                    .filter(Files::isDirectory)
                    .map(p -> p.getFileName().toString())
                    .collect(Collectors.toList());
        }
    }

    private void processTableDirectory(Connection conn, String tableDir) throws Exception {
        if (!Arrays.asList(EXCLUDED_TABLES).contains(tableDir)) {
            logger.info("跳过 {} 表的同步", tableDir);
            return;
        }
        String tableName = shouldAppendYbSuffix(tableDir) ? tableDir + "_yb" : tableDir;
        logger.info("正在处理表目录: {}", tableDir);

        loadTableSchema(conn, tableName);

        Path tablePath = Paths.get(localDataPath, tableDir);
        if ("company_profile".equals(tableDir) && isSynchronousAll == countNumber) {
            logger.info("特殊处理: company_profile表同步20241031及以后的数据");
            processDateDirectories(tablePath, tableName, tableDir, dir -> dir.compareTo("20241031") >= 0,conn);
        } else if ("company_allow".equals(tableDir) && isSynchronousAll == countNumber) {
            logger.info("特殊处理: company_allow表同步20250418及以后的数据");
            processDateDirectories(tablePath, tableName, tableDir, dir -> dir.compareTo("20250418") >= 0,conn);
        } else if ("opt_ent_partner_cn".equals(tableDir) && isSynchronousAll == countNumber) {
            logger.info("特殊处理: opt_ent_partner_cn表同步20250304及以后的数据");
            processDateDirectories(tablePath, tableName, tableDir, dir -> dir.compareTo("20250304") >= 0,conn);
        } else if ("risk_court_notice".equals(tableDir) && isSynchronousAll == countNumber) {
            logger.info("特殊处理: risk_court_notice表同步20250413及以后的数据");
            processDateDirectories(tablePath, tableName, tableDir, dir -> dir.compareTo("20250413") >= 0,conn);
        } else /** if ("risk_zhixing".equals(tableDir) && isSynchronousAll == countNumber) {
            logger.info("特殊处理: risk_zhixing表同步20250101及以后的数据");
            processDateDirectories(tablePath, tableName, tableDir, dir -> dir.compareTo("20250101") >= 0,conn);
        } else if ("company_base".equals(tableDir) && isSynchronousAll == countNumber) {
            logger.info("特殊处理: company_base表同步20241103及以后的数据");
            processDateDirectories(tablePath, tableName, tableDir, dir -> dir.compareTo("20241103") >= 0,conn);
        } else*/
        if (isSynchronousAll == countNumber) {
            logger.info("同步所有数据");
            processDateDirectories(tablePath, tableName, tableDir, dir -> true,conn);
        } else {
            logger.info("同步最新日期数据");
            processLatestDateDirectory(tablePath, tableName, tableDir,conn);
        }
    }

    private void processDateDirectories(Path tablePath, String tableName, String tableDir,
                                        java.util.function.Predicate<String> filter, Connection conn) throws Exception {
        try (Stream<Path> datePaths = Files.list(tablePath)) {
            List<String> dateDirs = datePaths
                    .filter(Files::isDirectory)
                    .map(p -> p.getFileName().toString())
                    .filter(filter)
                    .sorted()
                    .collect(Collectors.toList());

            for (String dateDir : dateDirs) {
                processDateDirectory(tableName, dateDir, tableDir,conn);
            }
        }
    }

    private void processLatestDateDirectory(Path tablePath, String tableName, String tableDir, Connection conn) throws Exception {
        try (Stream<Path> datePaths = Files.list(tablePath)) {
            Optional<String> latestDate = datePaths
                    .filter(Files::isDirectory)
                    .map(p -> p.getFileName().toString())
                    .max(Comparator.naturalOrder());

            if (latestDate.isPresent()) {
                processDateDirectory(tableName, latestDate.get(), tableDir,conn);
            } else {
                logger.warn("表目录 {} 中没有找到日期子目录", tableDir);
            }
        }
    }

    private boolean shouldAppendYbSuffix(String tableName) {
        return new HashSet<>(Arrays.asList(
                "company_change",
                "company_industry",
                "company_history_name",
                "company_partner_realpay",
                "company_partner",
                "company_partner_pay",
                "tb_company_sme"
        )).contains(tableName);
    }

    private void loadTableSchema(Connection conn, String tableName) throws SQLException {
        if (!tableSchemaCache.containsKey(tableName)) {
            Map<String, Integer> schema = new HashMap<>();
            try (ResultSet rs = conn.getMetaData().getColumns(null, null, tableName, null)) {
                while (rs.next()) {
                    schema.put(rs.getString("COLUMN_NAME"), rs.getInt("DATA_TYPE"));
                }
            }

            if (!schema.containsKey(DEFAULT_PK_FIELD)) {
                throw new SQLException("缺少主键字段: " + DEFAULT_PK_FIELD);
            }

            tableSchemaCache.put(tableName, schema);
            logger.info("已加载表结构: {} ({}列)", tableName, schema.size());
        }
    }

    private void processDateDirectory(String tableName, String dateDir, String tableDir, Connection conn) throws Exception {
        logger.debug("开始处理日期目录: {}/{}", tableDir, dateDir);
        Path datePath = Paths.get(localDataPath, tableDir, dateDir);

        try (Stream<Path> filePaths = Files.list(datePath)) {
            List<Path> gzFiles = filePaths
                    .filter(Files::isRegularFile)
                    .filter(p -> p.toString().endsWith(".gz"))
                    .collect(Collectors.toList());

            for (Path gzFile : gzFiles) {
                try {
                    processGzFile(tableName, gzFile.toString(),conn);
                } catch (Exception e) {
                    logger.error("处理文件 {} 失败，跳过继续处理其他文件: {}", gzFile, e.getMessage(), e);
                }
            }
        }
        logger.debug("完成处理日期目录: {}/{}", tableDir, dateDir);
    }

    private void processGzFile(String tableName, String filePath,Connection conn) throws Exception {
        long startTime = System.currentTimeMillis();
        File localFile = new File(filePath);

            conn.setAutoCommit(false);
            try {
                logger.info("开始处理文件: {}, 大小: {}MB", filePath,
                        Files.size(localFile.toPath()) / (1024 * 1024));

                processJsonStream(conn, tableName, localFile);
                conn.commit();
                long parseTime = System.currentTimeMillis() - startTime;
                logger.info("文件处理完成: {} (总耗时: {}ms)", filePath, parseTime);
            } catch (Exception e) {
                conn.rollback();
                logger.error("文件处理失败（已回滚）: {}", filePath, e);
                throw e;
            }
    }

    // 流式解析JSON的核心方法（大幅降低内存使用）
    private void processJsonStream(Connection conn, String tableName, File localFile) throws Exception {
        Map<String, Integer> schema = tableSchemaCache.get(tableName);
        if (schema == null) {
            throw new SQLException("未找到表结构: " + tableName);
        }

        List<Map<String, Object>> batchRecords = new ArrayList<>(batchSize);
        JsonFactory factory = objectMapper.getFactory();

        try (InputStream fis = new FileInputStream(localFile);
             InputStream gzis = new GzipCompressorInputStream(fis);
             BufferedReader reader = new BufferedReader(new InputStreamReader(gzis, StandardCharsets.UTF_8))) {

            int recordCount = 0;
            String line;
            while ((line = reader.readLine()) != null) {
                try {
                    // 使用Jackson解析单行JSON对象
                    try (JsonParser parser = factory.createParser(line)) {
                        JsonToken token = parser.nextToken();
                        if (token != JsonToken.START_OBJECT) {
                            logger.warn("跳过非JSON对象行: {}", line);
                            continue;
                        }

                        Map<String, Object> record = parseJsonObject(parser, schema);
                        if (record.containsKey(DEFAULT_PK_FIELD)) {
                            recordCount++;
                            Map<String, Object> sanitizedRecord = sanitizeRecord(record);
                            batchRecords.add(sanitizedRecord);

                            if (batchRecords.size() >= batchSize) {
                                executeUpsertWithRetry(conn, tableName, batchRecords, BATCH_RETRY_COUNT);
                                batchRecords.clear();
                            }
                        }
                    }
                } catch (Exception e) {
                    logger.warn("解析行失败: {} | 错误: {}", line, e.getMessage());
                }
            }

            // 处理剩余记录
            if (!batchRecords.isEmpty()) {
                executeUpsertWithRetry(conn, tableName, batchRecords, BATCH_RETRY_COUNT);
                batchRecords.clear();
            }

            logger.info("解析完成: {}条记录", recordCount);
        }
    }

    private Map<String, Object> parseJsonObject(JsonParser parser, Map<String, Integer> schema) throws IOException {
        Map<String, Object> record = new HashMap<>();

        while (parser.nextToken() != JsonToken.END_OBJECT) {
            String field = parser.currentName();
            JsonToken token = parser.nextToken(); // 移动到值

            if (schema.containsKey(field)) {
                if (token == JsonToken.START_ARRAY) {
                    // 直接将数组转换为JSON字符串
                    record.put(field, parser.readValueAsTree().toString());
                } else {
                    record.put(field, convertJsonValue(parser, schema.get(field)));
                }
            } else {
                skipField(parser, token);
            }
        }
        return record;
    }

    /**
     * 跳过不需要处理的字段
     */
    private void skipField(JsonParser parser, JsonToken token) throws IOException {
        if (token == JsonToken.START_OBJECT || token == JsonToken.START_ARRAY) {
            parser.skipChildren();
        }
        // 其他基本类型会自动跳过
    }

    private Object convertJsonValue(JsonParser parser, int sqlType) throws IOException {
        if (parser.currentToken() == JsonToken.VALUE_NULL) {
            return null;
        }

        try {
            switch (sqlType) {
                case Types.SMALLINT:
                case Types.INTEGER:
                    if (parser.currentToken() == JsonToken.VALUE_STRING) {
                        return Integer.parseInt(parser.getText());
                    }
                    return parser.getIntValue();
                case Types.BIGINT:
                    if (parser.currentToken() == JsonToken.VALUE_STRING) {
                        return Long.parseLong(parser.getText());
                    }
                    return parser.getLongValue();
                case Types.FLOAT:
                case Types.DOUBLE:
                    if (parser.currentToken() == JsonToken.VALUE_STRING) {
                        return Double.parseDouble(parser.getText());
                    }
                    return parser.getDoubleValue();
                case Types.DECIMAL:
                case Types.NUMERIC:
                    if (parser.currentToken() == JsonToken.VALUE_STRING) {
                        return new BigDecimal(parser.getText());
                    } else if (parser.currentToken().isNumeric()) {
                        return parser.getDecimalValue();
                    }
                    return new BigDecimal(parser.getValueAsString());
                case Types.BOOLEAN:
                    if (parser.currentToken() == JsonToken.VALUE_STRING) {
                        return Boolean.parseBoolean(parser.getText());
                    }
                    return parser.getBooleanValue();
                case Types.DATE:
                    return parseDate(parser.getValueAsString());
                case Types.TIMESTAMP:
                    return parseTimestamp(parser.getValueAsString());
                default:
                    return parser.getValueAsString();
            }
        } catch (Exception e) {
            logger.warn("值转换失败: {} (期望类型: {})", parser.getValueAsString(), sqlTypeName(sqlType), e);
            return null;
        }
    }

    // 辅助方法：将SQL类型转换为可读名称
    private String sqlTypeName(int sqlType) {
        switch (sqlType) {
            case Types.INTEGER: return "INTEGER";
            case Types.BIGINT: return "BIGINT";
            case Types.DOUBLE: return "DOUBLE";
            case Types.DECIMAL: return "DECIMAL";
            case Types.NUMERIC: return "NUMERIC";
            case Types.BOOLEAN: return "BOOLEAN";
            case Types.DATE: return "DATE";
            case Types.TIMESTAMP: return "TIMESTAMP";
            default: return "STRING";
        }
    }

    // 处理日期格式
    private Date parseDate(String value) {
        if (value == null) return null;
        try {
            return Date.valueOf(value.split("T")[0]);
        } catch (Exception e) {
            logger.warn("日期格式解析失败: {}", value);
            return null;
        }
    }

    // 处理时间戳格式
    private Timestamp parseTimestamp(String value) {
        if (value == null) return null;
        try {
            return Timestamp.valueOf(value.replace("T", " "));
        } catch (Exception e) {
            logger.warn("时间戳格式解析失败: {}", value);
            return null;
        }
    }

    private Map<String, Object> sanitizeRecord(Map<String, Object> record) {
        Map<String, Object> sanitized = new HashMap<>(record.size());

        record.forEach((key, value) -> {
            if (value instanceof String) {
                sanitized.put(key, sanitizeString((String) value));
            } else {
                sanitized.put(key, value);
            }
        });

        return sanitized;
    }

    private String sanitizeString(String input) {
        if (input == null) {
            return null;
        }

        // 使用更高效的字符检查方式
        int len = input.length();
        int i = 0;
        while (i < len) {
            char c = input.charAt(i);
            if (c <= 31 || c == 127) break;
            i++;
        }

        // 不需要清理则直接返回修剪后的字符串
        if (i == len) {
            return input.trim();
        }

        // 需要清理时才进行实际处理
        char[] chars = input.toCharArray();
        for (; i < chars.length; i++) {
            if (chars[i] <= 31 || chars[i] == 127) {
                chars[i] = '?';
            }
        }
        return new String(chars).trim();
    }

    private void executeUpsertWithRetry(Connection conn, String tableName, List<Map<String, Object>> records, int maxRetries) throws SQLException {
        if (records.isEmpty()) return;

        int retryCount = 0;
        while (retryCount <= maxRetries) {
            try {
                executeUpsert(conn, tableName, records);
                return;
            } catch (SQLException e) {
                retryCount++;
                if (retryCount > maxRetries) {
                    logger.error("表 {} 数据插入失败，已达最大重试次数: {}", tableName, e.getMessage(), e);
                    throw e;
                }
                logger.warn("表 {} 数据插入失败，第{}/{}次重试: {}", tableName, retryCount, maxRetries, e.getMessage());
                try {
                    Thread.sleep(1000 * retryCount);
                } catch (InterruptedException ie) {
                    Thread.currentThread().interrupt();
                    throw new SQLException("操作被中断", ie);
                }
            }
        }
    }

    private void executeUpsert(Connection conn, String tableName, List<Map<String, Object>> records) throws SQLException {
        if (records.isEmpty()) return;

        String[] fields = records.get(0).keySet().toArray(new String[0]);
        String sql = buildUpsertSql(conn, tableName, fields);

        try (PreparedStatement stmt = conn.prepareStatement(sql)) {
            int totalProcessed = 0;
            for (Map<String, Object> record : records) {
                try {
                    for (int i = 0; i < fields.length; i++) {
                        setStatementParameter(stmt, i + 1, record.get(fields[i]),
                                tableSchemaCache.get(tableName).get(fields[i]));
                    }
                    stmt.addBatch();
                    totalProcessed++;

                    // 分批执行，避免超大事务
                    if (totalProcessed % batchSize == 0) {
                        executeBatch(stmt, tableName, totalProcessed);
                    }
                } catch (SQLException e) {
                    logger.warn("表 {} 记录处理失败，跳过此记录: {}", tableName, e.getMessage());
                }
            }

            // 处理剩余批次
            if (totalProcessed % batchSize != 0) {
                executeBatch(stmt, tableName, totalProcessed);
            }
        }
    }

    private void executeBatch(PreparedStatement stmt, String tableName, int totalCount) throws SQLException {
        try {
            int[] results = stmt.executeBatch();
            int successCount = 0;
            for (int result : results) {
                if (result >= 0 || result == Statement.SUCCESS_NO_INFO) {
                    successCount++;
                }
            }
            logger.info("表 {} 批处理结果: 成功={}/总数={}", tableName, successCount, totalCount);
        } catch (BatchUpdateException e) {
            handleBatchUpdateException(e, tableName);
        } finally {
            stmt.clearBatch();
        }
    }

    private void handleBatchUpdateException(BatchUpdateException e, String tableName) throws SQLException {
        int[] updateCounts = e.getUpdateCounts();
        int failedCount = 0;

        for (int i = 0; i < updateCounts.length; i++) {
            if (updateCounts[i] == Statement.EXECUTE_FAILED) {
                failedCount++;
            }
        }

        logger.warn("表 {} 批量插入部分失败: 总条数={}, 失败数={}",
                tableName, updateCounts.length, failedCount);

        SQLException next = e.getNextException();
        while (next != null) {
            logger.warn("插入错误详情: {}", next.getMessage());
            next = next.getNextException();
        }
    }

    private String buildUpsertSql(Connection conn, String tableName, String[] fields) throws SQLException {
        if (isPostgreSQL(conn)) {
            return buildPostgresUpsertSql(tableName, fields);
        } else {
            return buildGenericUpsertSql(tableName, fields);
        }
    }

    private String buildPostgresUpsertSql(String tableName, String[] fields) {
        String columns = String.join(",", fields);
        String placeholders = String.join(",", Collections.nCopies(fields.length, "?"));
        String updates = Arrays.stream(fields)
                .filter(f -> !f.equals(DEFAULT_PK_FIELD))
                .map(f -> f + "=EXCLUDED." + f)
                .collect(Collectors.joining(","));

        return String.format(
                "INSERT INTO %s (%s) VALUES (%s) ON CONFLICT (%s) DO UPDATE SET %s",
                tableName, columns, placeholders, DEFAULT_PK_FIELD, updates);
    }

    private String buildGenericUpsertSql(String tableName, String[] fields) {
        String columns = String.join(",", fields);
        String placeholders = String.join(",", Collections.nCopies(fields.length, "?"));
        String updates = Arrays.stream(fields)
                .filter(f -> !f.equals(DEFAULT_PK_FIELD))
                .map(f -> f + "=?")
                .collect(Collectors.joining(","));

        return String.format(
                "MERGE INTO %s USING (VALUES(%s)) AS tmp(col) ON %s.%s=tmp.col " +
                        "WHEN MATCHED THEN UPDATE SET %s WHEN NOT MATCHED THEN INSERT (%s) VALUES (%s)",
                tableName, placeholders, tableName, DEFAULT_PK_FIELD, updates, columns, placeholders);
    }

    private void setStatementParameter(PreparedStatement stmt, int index, Object value, int sqlType) throws SQLException {
        if (value == null) {
            stmt.setNull(index, sqlType);
        } else {
            stmt.setObject(index, value, sqlType);
        }
    }

    private boolean isPostgreSQL(Connection conn) throws SQLException {
        return conn.getMetaData().getDatabaseProductName().equals("PostgreSQL");
    }

    // 获取数据库连接
    private Connection getConnection() throws SQLException {
        Connection conn = dataSource.getConnection();
        conn.setAutoCommit(false);
        return conn;
    }
}
