package com.ververica.cdc.guass.sink.jdbc.derby.table.base;

import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.ververica.cdc.guass.Constants;
import com.ververica.cdc.guass.sink.jdbc.derby.DerbyTestBase;
import com.ververica.cdc.guass.sink.jdbc.testutils.DatabaseMetadata;
import com.ververica.cdc.guass.sink.jdbc.testutils.TableManaged;
import com.ververica.cdc.guass.sink.jdbc.testutils.tables.TableRow;
import com.ververica.cdc.guass.source.kafka.data.ChangeEvent;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.*;
import java.sql.Date;
import java.util.*;

import static com.ververica.cdc.guass.sink.jdbc.testutils.tables.TableBuilder.*;
import static com.ververica.cdc.guass.sink.jdbc.testutils.tables.TableBuilder.field;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.awaitility.Awaitility.await;
import static org.junit.Assert.assertNotNull;
import static org.junit.jupiter.api.Assertions.*;

public class JdbcHybridSourceToSinkWithCompositePKITCase implements DerbyTestBase {

    private final Logger log = LoggerFactory.getLogger(this.getClass());

    private StreamExecutionEnvironment env;
    private StreamTableEnvironment tEnv;


    // 定义数据源（Source）表的物理结构
    protected final TableRow kafkaSourceTable = createKafkaSourceTable();

    // 定义接收器（Sink）表的物理结构
    protected final TableRow jdbcSinkTable = createJdbcSinkTable();

    private List<Map<String, Object>> expectedResults = Arrays.asList(
            createRecord("Bob", "1992-03-15", "OtherData3"),
            createRecord("Charlie", "1987-07-25", "OtherData4"),
            createRecord("Dave", "1995-09-30", "OtherData5"),
            createRecord("Eva", "1991-05-17", "OtherData6"),
            createRecord("John", "1990-01-01", "UpdatedData")
    );


    /**
     * 定义接收器（Sink）表的物理结构
     */
    protected TableRow createJdbcSinkTable() {
        return tableRow(
                "jdbc_sink_table2", // Sink 表名称
                pkField("name", DataTypes.VARCHAR(1000).notNull()), // Composite PK with name as String
                pkField("birthdate", DataTypes.DATE().notNull()), // Composite PK with birthdate as Date
                field(Constants.PX_GAUSS_MARKS, DataTypes.VARCHAR(1000)),
                field("other_field", DataTypes.VARCHAR(1000)) // Optional additional field for testing
        );
    }

    /**
     * 定义源表（Kafka Source）的物理结构
     */
    protected TableRow createKafkaSourceTable() {
        return tableRow(
                "kafka_source_table2",
                pkField("name", DataTypes.VARCHAR(1000).notNull()),  // String field as part of composite key
                pkField("birthdate", DataTypes.DATE().notNull()),  // Date field as part of composite key
                field(Constants.PX_GAUSS_MARKS, DataTypes.VARCHAR(1000)),
                field("other_field", DataTypes.VARCHAR(1000))
        );
    }

    @Override
    public List<TableManaged> getManagedTables() {
        return Arrays.asList(
                kafkaSourceTable,
                jdbcSinkTable
        );
    }

    @BeforeEach
    void setup() {
        env = StreamExecutionEnvironment.getExecutionEnvironment();
        tEnv = StreamTableEnvironment.create(
                env,
                EnvironmentSettings.newInstance().inStreamingMode().build());
        env.setParallelism(4);
    }

    @Test
    void testHybridSourceToSinkWithCompositePK() throws Exception {
        DatabaseMetadata metadata = getMetadata();

        // 创建源表
        String sourceTableName = kafkaSourceTable.getTableName();
        String createSourceTableDdl = kafkaSourceTable.getCreateQueryForFlink(
                kafkaSourceTable.getTableName(),
                Arrays.asList(
                        "'connector' = '" + Constants.GAUSS_CDC_CONNECTOR_NAME + "'",
                        "'topic' = '" + TOPIC + "'",
                        "'properties.bootstrap.servers' =  '" + BOOTSTRAP_SERVERS + "'",
                        "'properties.group.id' =  '" + GROUP_ID + "'",
                        "'scan.startup.mode' =  '" + SCAN_STARTUP_MODE + "'",
                        "'url' = '" + metadata.getJdbcUrl() + "'",
                        "'username' = '" + metadata.getUsername() + "'",
                        "'password' = '" + metadata.getPassword() + "'",
                        "'table-name' = '" + sourceTableName + "'",
                        "'scan.startup.mode' = '" + SCAN_STARTUP_MODE + "'",
                        "'enable-parallel-read' = 'true'"
                )
        );
        tEnv.executeSql(createSourceTableDdl);

        // 创建目标表
        String sinkTableName = jdbcSinkTable.getTableName();
        String createSinkTableDdl = jdbcSinkTable.getCreateQueryForFlink(
                getMetadata(),
                sinkTableName,
                Arrays.asList(
                        "'connector' = '" + Constants.GAUSS_JDBC_NAME + "'",
                        String.format("'url' = '%s'", getMetadata().getJdbcUrl()),
                        String.format("'username' = '%s'", getMetadata().getUsername()),
                        String.format("'password' = '%s'", getMetadata().getPassword()),
                        String.format("'table-name' = '%s'", jdbcSinkTable.getTableName()),
                        "'sink.buffer-flush.max-rows' = '1'",
                        "'sink.buffer-flush.interval' = '0'",
                        "'sink.max-retries' = '0'"
                )
        );
        tEnv.executeSql(createSinkTableDdl);

        // 在源表中插入初始数据（全量数据）
        try (Connection connection = getConnection();
             Statement statement = connection.createStatement()) {

            statement.execute("INSERT INTO " + sourceTableName + " (name, birthdate, other_field) VALUES" +
                    " ('John', '1990-01-01', 'OtherData1'), " +
                    " ('Alice', '1985-02-12', 'OtherData2'), " +
                    " ('Bob', '1992-03-15', 'OtherData3'), " +
                    " ('Charlie', '1987-07-25', 'OtherData4'), " +
                    " ('Dave', '1995-09-30', 'OtherData5')");
        } catch (SQLException e) {
            throw new RuntimeException("Failed to insert initial data into source table", e);
        }

        // 发送变更数据到 Kafka
        // 执行 INSERT 操作
        sendToKafka(sourceTableName, TOPIC, "Eva", "1991-05-17", "OtherData6", "I");

        // 执行 UPDATE 操作
        sendToKafka(sourceTableName, TOPIC, "John", "1990-01-01", "UpdatedData", "U");

        // 执行 DELETE 操作
        sendToKafka(sourceTableName, TOPIC, "Alice", "1985-02-12", null, "D");

        // 执行增量和全量同步
        tEnv.executeSql("INSERT INTO " + sinkTableName + " SELECT name, birthdate, " + Constants.PX_GAUSS_MARKS + ",other_field FROM " + sourceTableName);

        // 执行增量和全量同步
        await().atMost(120, SECONDS).untilAsserted(() -> {
            try (Connection connection = DriverManager.getConnection(metadata.getJdbcUrl());
                 Statement statement = connection.createStatement()) {

                // 首先验证记录数量
                ResultSet countResult = statement.executeQuery("SELECT COUNT(*) FROM " + sinkTableName);
                assertTrue(countResult.next(), "Count query should return a result");


                int actualCount = countResult.getInt(1);
                int expectedCount = expectedResults.size(); // 预期的记录数量
                assertEquals(expectedCount, actualCount, "Record count mismatch");

                // 如果记录数量正确，再验证具体字段
                if (actualCount == expectedCount) {
                    ResultSet resultSet = statement.executeQuery("SELECT * FROM " + sinkTableName + " ORDER BY name");

                    for (Map<String, Object> expected : expectedResults) {
                        assertTrue(resultSet.next(), "Expected more rows in the result set");

                        assertEquals(expected.get("name"), resultSet.getString("name"),
                                "Name mismatch for record: " + expected.get("name"));
                        assertEquals(expected.get("birthdate"), resultSet.getDate("birthdate"),
                                "Birthdate mismatch for record: " + expected.get("name"));
                        assertEquals(expected.get("other_field"), resultSet.getString("other_field"),
                                "Other field mismatch for record: " + expected.get("name"));

                        assertNotNull(resultSet.getString(Constants.PX_GAUSS_MARKS),
                                "PX_GAUSS_MARKS should not be null for record: " + expected.get("name"));
                    }

                    assertFalse(resultSet.next(), "Result set contains more rows than expected");
                }
            }
        });
    }



    private static Map<String, Object> createRecord(String name, String birthdate, String otherField) {
        return new HashMap<String, Object>() {{
            put("name", name);
            put("birthdate", Date.valueOf(birthdate));
            put("other_field", otherField);
        }};
    }

    private void sendToKafka(String tableName, String topic, String name, String birthdate, String otherField, String opType) {

        try {
            // 构建 ChangeEvent 对象
            ChangeEvent changeEvent = new ChangeEvent();
            changeEvent.setTable(tableName);
            changeEvent.setOpType(opType);

            // 设置数据字段
            HashMap<String, Object> data = new HashMap<>();
            data.put("name", name);
            data.put("birthdate", birthdate);
            if (otherField != null) {
                data.put("other_field", otherField);
            }
            changeEvent.setData(data);

            // 示例代码：将 changeEvent 对象转换为 JSON 字符串并发送到 Kafka
            ObjectMapper mapper = new ObjectMapper();
            String jsonString = mapper.writeValueAsString(changeEvent);
            KafkaProducer<String, String> producer = new KafkaProducer<>(getProperties());
            //producer.send(new ProducerRecord<>(topic, tableName, jsonString));
            log.info("Sent message to Kafka topic " + topic + ": " + jsonString);
        } catch (JsonProcessingException e) {
            throw new RuntimeException(e);
        }
    }
}

