package com.ververica.cdc.guass.sink.jdbc.internal.executor;

import com.ververica.cdc.guass.sink.jdbc.core.database.dialect.JdbcDialect;
import com.ververica.cdc.guass.sink.jdbc.core.database.dialect.JdbcDialectConverter;
import com.ververica.cdc.guass.sink.jdbc.statement.FieldNamedPreparedStatement;
import com.ververica.cdc.guass.sink.jdbc.util.OperationType;
import com.ververica.cdc.guass.source.kafka.data.NullableValue;
import org.apache.flink.table.data.GenericRowData;
import org.apache.flink.table.data.RowData;
import org.apache.flink.table.types.logical.LogicalType;
import org.apache.flink.table.types.logical.RowType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.BatchUpdateException;
import java.sql.Connection;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.*;

import static org.apache.flink.util.Preconditions.checkNotNull;

/**
 * Create by lzhcode
 * At 2014/12/06
 * <p>
 * 思路：
 * 1. 在 addToBatch 时，不做立即执行，而是将记录及其存在的字段信息缓存起来。
 * 2. 缓存的数据结构：
 * Map<List<String>, List<GenericRowData>>
 * key为字段列表（存在字段的有序列表），value为该类字段模式下的所有待插入行。
 * 3. 在 executeBatch 时，对每组（同样的字段列表）构建对应的 INSERT SQL 和 PreparedStatement（如果方言支持 Upsert 则优先使用 Upsert 语句）
 * 使用 rowConverter 将所有行的值设置到PreparedStatement中，最后批量执行。
 * 4. 批量执行完成后清空缓存。
 */
public class TableSimpleDynamicFieldsStatementExecutor implements JdbcBatchStatementExecutor<RowData> {

    private final JdbcDialect dialect;
    private final JdbcDialectConverter rowConverter;
    private final String[] allFieldNames;
    private final String[] pkNames;
    private final LogicalType[] fieldTypes;
    private final String tableName;
    private final OperationType opType;

    private Connection connection;
    private transient FieldNamedPreparedStatement allFiledStatement;


    // 缓存结构：key为存在字段名的列表（用于区分不同的语句），value为对应待写入的行列表
    private final Map<List<String>, List<GenericRowData>> bufferedRows = new HashMap<>();

    private static final Logger LOG = LoggerFactory.getLogger(TableSimpleDynamicFieldsStatementExecutor.class);


    // 新增当前批次ID跟踪
    private String currentBatchId;

    public TableSimpleDynamicFieldsStatementExecutor(
            JdbcDialect dialect,
            JdbcDialectConverter rowConverter,
            String[] fieldNames,
            String[] pkNames,
            LogicalType[] fieldTypes,
            String tableName,
            OperationType opType) {
        this.dialect = dialect;
        if (opType == OperationType.Delete) {
            this.allFieldNames = pkNames;
        } else {
            this.allFieldNames = fieldNames;
        }
        this.pkNames = pkNames;
        this.fieldTypes = fieldTypes;
        this.tableName = tableName;
        this.opType = opType;
        this.rowConverter = checkNotNull(rowConverter);
    }

    @Override
    public void prepareStatements(Connection connection) throws SQLException {
        this.connection = connection;
        String statement = getStatementForOperation(allFieldNames);
        allFiledStatement = FieldNamedPreparedStatement.prepareStatement(connection, statement, allFieldNames);
    }

    @Override
    public void addToBatch(RowData record) throws SQLException {

        // 确保批次ID初始化
        if (currentBatchId == null) {
            currentBatchId = UUID.randomUUID().toString();
            LOG.info("Started new batch {}", currentBatchId);
        }

        if (record instanceof GenericRowData) {
            // Extract existing fields and their values
            List<String> existFieldNames = new ArrayList<>();
            List<Object> existValues = new ArrayList<>();


            for (int i = 0; i < allFieldNames.length; i++) {
                boolean fieldExists;
                Object field = ((GenericRowData) record).getField(i);
                Object realValue = null;
                if (field instanceof NullableValue) {
                    NullableValue nv = (NullableValue) field;
                    fieldExists = nv.isExists();
                    realValue = nv.getValue();
                } else {
                    //this is delete evecnt
                    fieldExists = !record.isNullAt(i);
                    realValue = field;
                }

                if (fieldExists) {
                    existFieldNames.add(allFieldNames[i]);
                    existValues.add(realValue);
                }

            }

            if (opType == OperationType.Delete) {
                // For delete operations, ensure all primary keys are present
                boolean hasAllPrimaryKeys = true;
                for (String pk : pkNames) {
                    if (!existFieldNames.contains(pk)) {
                        hasAllPrimaryKeys = false;
                        break;
                    }
                }

                if (!hasAllPrimaryKeys) {
                    // Missing primary key fields; cannot perform delete
                    // Optionally, log a warning or throw an exception
                    System.err.println("Warning: Missing primary key fields for delete operation. Skipping row.");
                    return;
                }
            } else {
                // For other operations, skip if no fields are present
                if (existFieldNames.isEmpty()) {
                    return;
                }
            }

            // Create a GenericRowData for the existing fields
            GenericRowData filteredRow = new GenericRowData(record.getRowKind(), existValues.size());
            for (int j = 0; j < existValues.size(); j++) {
                filteredRow.setField(j, existValues.get(j));
            }


            // Add to buffered rows
            List<String> keyList = Collections.unmodifiableList(existFieldNames);
            bufferedRows.computeIfAbsent(keyList, k -> new ArrayList<>()).add(filteredRow);
        } else {
            rowConverter.toExternal(record, allFiledStatement);
            allFiledStatement.addBatch();
        }

    }

    public void executeBatch() throws SQLException {
        if (!bufferedRows.isEmpty()) {
            for (Map.Entry<List<String>, List<GenericRowData>> entry : bufferedRows.entrySet()) {
                List<String> fieldList = entry.getKey();
                List<GenericRowData> rows = entry.getValue();
                String[] fieldArray = fieldList.toArray(new String[0]);
                String statement = getStatementForOperation(fieldArray);

                executeBatchWithErrorHandling(statement, fieldList, rows);
            }
            bufferedRows.clear();
        } else {
            executeBatchWithErrorHandling(allFiledStatement);
        }
        currentBatchId = null; // 重置批次ID
    }

    private void executeBatchWithErrorHandling(String statement, List<String> fieldList, List<GenericRowData> rows) throws SQLException {
        // 打印调试信息：开始执行批处理
        LOG.info("batchId {} Executing batch with statement: {}",currentBatchId, statement);
        LOG.info("batchId {} Fields: {}", currentBatchId,fieldList);
        LOG.info("batchId {} Number of rows:{} ",currentBatchId, rows.size());


        FieldNamedPreparedStatement partFieldStatement = null;
        try {
            // 创建转换器
            JdbcDialectConverter partialConverter = createPartialConverter(fieldList);

            partFieldStatement = FieldNamedPreparedStatement.prepareStatement(connection, statement, fieldList.toArray(new String[0]));
            for (GenericRowData row : rows) {
                try {
                    // 转换数据并添加到批处理
                    partialConverter.toExternal(row, partFieldStatement);
                    partFieldStatement.addBatch();
                } catch (Exception e) {
                    // 如果转换出错，打印调试信息并继续处理其他行
                    LOG.error("Error converting row data: {} ", row);
                    e.printStackTrace();
                }
            }
            try {
                // 执行批处理
                int[] updateCounts = partFieldStatement.executeBatch();
                LOG.info("Batch {} executed, update counts: {}",currentBatchId, Arrays.toString(updateCounts));
            } catch (BatchUpdateException e) {
                // 打印调试信息：批处理执行异常
                LOG.error("Batch execution failed:{} ", e.getMessage());
                LOG.error("Batch {} update counts: {},", Arrays.toString(e.getUpdateCounts()),currentBatchId);
                handleBatchUpdateException(e, rows);
            }
        } catch (SQLException e) {
            LOG.error("Execute batch error", e);
            throw e;
        }
    }


    private void executeBatchWithErrorHandling(FieldNamedPreparedStatement statement) throws SQLException {
        try {
            statement.executeBatch();
        } catch (BatchUpdateException e) {
            handleBatchUpdateException(e, Collections.emptyList());
            throw e;
        }
    }

    private void handleBatchUpdateException(BatchUpdateException e, List<GenericRowData> rows) {
        int[] updateCounts = e.getUpdateCounts();
        for (int i = 0; i < updateCounts.length; i++) {
            if (updateCounts[i] == Statement.EXECUTE_FAILED) {
                if (i < rows.size()) {
                    handleFailedStatement(rows.get(i));
                } else {
                    handleFailedStatement(null);
                }
            }
        }
    }

    private void handleFailedStatement(RowData row) {
        if (row != null) {
            LOG.error("batch {} Failed to execute statement for row: {}", currentBatchId,row);
        } else {
            LOG.error("batch {} Failed to execute statement", currentBatchId);
        }
    }

    @Override
    public void closeStatements() throws SQLException {
        if (allFiledStatement != null) {
            allFiledStatement.close();
            allFiledStatement = null;
        }
    }

    private String getStatementForOperation(String[] fieldArray) throws SQLException {
        switch (opType) {
            case Insert:
                return dialect.getInsertIntoStatement(tableName, fieldArray);
            case Update:
                return dialect.getUpdateStatement(tableName, fieldArray, pkNames);
            case Delete:
                return dialect.getDeleteStatement(tableName, pkNames);
            case Upsert:
                Optional<String> upsertStatement = dialect.getUpsertStatement(tableName, fieldArray, pkNames);
                return upsertStatement.orElseThrow(() -> new UnsupportedOperationException("Upsert not supported by this dialect"));
            default:
                throw new UnsupportedOperationException("Unknown operation type: " + opType);
        }
    }

    /**
     * 创建一个仅处理指定字段集合的converter。
     */
    private JdbcDialectConverter createPartialConverter(List<String> existFieldNames) {
        LogicalType[] existLogicalTypes = new LogicalType[existFieldNames.size()];
        for (int i = 0; i < existFieldNames.size(); i++) {
            int idx = Arrays.asList(allFieldNames).indexOf(existFieldNames.get(i));
            if (idx == -1) {
                throw new IllegalArgumentException("Field '" + existFieldNames.get(i) + "' not found in allFieldNames");
            }
            existLogicalTypes[i] = fieldTypes[idx];
        }
        RowType rowType = RowType.of(existLogicalTypes, existFieldNames.toArray(new String[0]));
        return dialect.getRowConverter(rowType);
    }

}
