package com.ververica.cdc.guass.sink.jdbc.derby.table.base;

import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.ververica.cdc.guass.Constants;
import com.ververica.cdc.guass.sink.jdbc.derby.DerbyTestBase;
import com.ververica.cdc.guass.sink.jdbc.testutils.DatabaseMetadata;
import com.ververica.cdc.guass.sink.jdbc.testutils.TableManaged;
import com.ververica.cdc.guass.sink.jdbc.testutils.tables.TableRow;
import com.ververica.cdc.guass.source.kafka.data.ChangeEvent;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.sql.*;
import java.util.*;

import static com.ververica.cdc.guass.sink.jdbc.testutils.tables.TableBuilder.*;
import static com.ververica.cdc.guass.sink.jdbc.testutils.tables.TableBuilder.field;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.awaitility.Awaitility.await;

public class JdbcHybridSourceToSinkITCase implements DerbyTestBase {

    private final Logger log = LoggerFactory.getLogger(this.getClass());


    private StreamExecutionEnvironment env;
    private StreamTableEnvironment tEnv;

    // 定义数据源（Source）表的物理结构
    protected final TableRow kafkaSourceTable = createKafkaSourceTable();

    // 定义接收器（Sink）表的物理结构
    protected final TableRow jdbcSinkTable = createJdbcSinkTable();


    /**
     * 定义接收器（Sink）表的物理结构
     */
    protected TableRow createJdbcSinkTable() {
        return tableRow(
                "jdbc_sink_table", // Sink 表名称
                pkField("id", DataTypes.INT().notNull()),
                field("name", DataTypes.VARCHAR(1000)),
                field(Constants.PX_GAUSS_MARKS, DataTypes.VARCHAR(1000)),
                field("age", DataTypes.INT())
        );
    }

    /**
     * 定义源表（Kafka Source）的物理结构
     */
    protected TableRow createKafkaSourceTable() {
        return tableRow(
                "kafka_source_table",
                pkField("id", DataTypes.INT().notNull()),
                field("name", DataTypes.VARCHAR(1000)),
                field(Constants.PX_GAUSS_MARKS, DataTypes.VARCHAR(1000)),
                field("age", DataTypes.INT())
        );
    }

    @Override
    public List<TableManaged> getManagedTables() {
        return Arrays.asList(
                kafkaSourceTable,
                jdbcSinkTable
        );
    }


    @BeforeEach
    void setup() {
        env = StreamExecutionEnvironment.getExecutionEnvironment();
        tEnv = StreamTableEnvironment.create(
                env,
                EnvironmentSettings.newInstance().inStreamingMode().build());
        env.setParallelism(4);
    }

    @Test
    void testHybridSourceToSink() throws Exception {
        DatabaseMetadata metadata = getMetadata();


        /**
         *  创建源表
         */
        TableRow kafkaSourceTable = createKafkaSourceTable();
        String sourceTableName = kafkaSourceTable.getTableName();


        String createSourceTableDdl = kafkaSourceTable.getCreateQueryForFlink(
                kafkaSourceTable.getTableName(),
                Arrays.asList(
                        "'connector' = '" + Constants.GAUSS_CDC_CONNECTOR_NAME + "'", // 根据您的实际连接器名称调整
                        "'topic' = '" + TOPIC + "'",
                        "'properties.bootstrap.servers' =  '" + BOOTSTRAP_SERVERS + "'",
                        "'properties.group.id' =  '" + GROUP_ID + "'",
                        "'scan.startup.mode' =  '" + SCAN_STARTUP_MODE + "'",
                        "'url' = '" + metadata.getJdbcUrl() + "'",
                        "'username' = '" + metadata.getUsername() + "'",
                        "'password' = '" + metadata.getPassword() + "'",
                        "'table-name' = '" + sourceTableName + "'",
                        "'scan.startup.mode' = '" + SCAN_STARTUP_MODE + "'",
                        "'enable-parallel-read' = 'true'"
                )
        );
        tEnv.executeSql(createSourceTableDdl);


        /***
         *  创建目标表
         */
        String sinkTableName = jdbcSinkTable.getTableName();
        String createSinkTableDdl = jdbcSinkTable.getCreateQueryForFlink(
                getMetadata(),
                sinkTableName,
                Arrays.asList(
                        "'connector' = '" + Constants.GAUSS_JDBC_NAME + "'",
                        String.format("'url' = '%s'", getMetadata().getJdbcUrl()),
                        String.format("'username' = '%s'", getMetadata().getUsername()),
                        String.format("'password' = '%s'", getMetadata().getPassword()),
                        String.format("'table-name' = '%s'", jdbcSinkTable.getTableName()),
                        "'sink.buffer-flush.max-rows' = '1'",
                        "'sink.buffer-flush.interval' = '0'",
                        "'sink.max-retries' = '0'"
                )
        );
        tEnv.executeSql(createSinkTableDdl);


        // 在源表中插入初始数据（全量数据）
        try (Connection connection = getConnection();
             Statement statement = connection.createStatement()) {
            statement.execute("INSERT INTO " + sourceTableName + " ( id, name, age) VALUES" +
                    " (1, 'John',  25), " +
                    " (2, 'Alice',  30), " +
                    " (3, 'John1',  26), " +
                    " (4, 'John2',  27), " +
                    " (5, 'John3',  28), " +
                    " (6, 'John4',  29), " +
                    " (7, 'John5',  30), " +
                    " (8, 'John6',  31), " +
                    " (9, 'John7',  32), " +
                    "(10, 'Alice8',  33)," +
                    " (11, 'John11',  34), " +
                    " (20, 'John20',  35) ");
        } catch (SQLException e) {
            throw new RuntimeException("Failed to insert initial data into source table", e);
        }

        // 在源表中插入增量数据，并发送变更数据到 Kafka
        // 执行 INSERT 操作
        sendToKafka(sourceTableName, TOPIC, 3, "Bob", 35, "I");
        // 执行 UPDATE 操作
        sendToKafka(sourceTableName, TOPIC, 1, null, 26, "U");
        // 执行 DELETE 操作
        sendToKafka(sourceTableName, TOPIC, 2, null, null, "D");

        // 执行增量和全量同步
        tEnv.executeSql("INSERT INTO " + sinkTableName + "  SELECT id, name," + Constants.PX_GAUSS_MARKS + ", age FROM " + sourceTableName);

        // 验证数据同步结果
        await().atMost(120, SECONDS).untilAsserted(() -> {

            Connection connection = DriverManager.getConnection(metadata.getJdbcUrl());
            Statement statement = connection.createStatement();
            ResultSet resultSet = statement.executeQuery("SELECT * FROM " + sinkTableName);
            List<String> expectedResults = Arrays.asList(
                    "1,John,26",
                    "3,Bob,35"
            );
            List<String> actualResults = new ArrayList<>();
            while (resultSet.next()) {
                actualResults.add(String.format(
                        "%d,%s,%d",
                        resultSet.getInt("id"),
                        resultSet.getString("name"),
                        resultSet.getInt("age")
                ));
            }
            org.junit.jupiter.api.Assertions.assertTrue(expectedResults.size() == actualResults.size());

        });


    }


    private void sendToKafka(String tableName, String topic, Integer id, String name, Integer age, String opType) {
        try {
            // 构建 ChangeEvent 对象
            ChangeEvent changeEvent = new ChangeEvent();
            changeEvent.setTable(tableName);
            changeEvent.setOpType(opType);

            // 设置数据字段
            HashMap<String, Object> data = new HashMap<>();
            data.put("id", id);
            if (name != null) {
                data.put("name", name);
            }
            if (age != null) {
                data.put("age", age);
            }
            changeEvent.setData(data);

            // 示例代码：将 changeEvent 对象转换为 JSON 字符串并发送到 Kafka
            ObjectMapper mapper = new ObjectMapper();
            String jsonString = mapper.writeValueAsString(changeEvent);
            KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(getProperties());
            kafkaProducer.send(new ProducerRecord<>(topic, jsonString));

            log.info("Sent message to Kafka topic " + topic + ": " + jsonString);

        } catch (JsonProcessingException e) {
            throw new RuntimeException(e);
        }
    }
}
