package com.ververica.cdc.guass.sink.jdbc.mysql.table.base;

import com.ververica.cdc.guass.Constants;
import com.ververica.cdc.guass.sink.jdbc.testutils.DatabaseTest;
import com.ververica.cdc.guass.sink.jdbc.testutils.TableManaged;
import com.ververica.cdc.guass.sink.jdbc.testutils.tables.TableRow;
import com.ververica.cdc.guass.source.kafka.GaussKafkaSourceTest;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.DataTypes;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.test.util.AbstractTestBase;
import org.apache.flink.types.Row;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Arrays;
import java.util.List;

import static com.ververica.cdc.guass.sink.jdbc.testutils.tables.TableBuilder.*;
import static java.util.concurrent.TimeUnit.SECONDS;
import static org.awaitility.Awaitility.await;

public abstract class JdbcSourceToSinkITCase extends AbstractTestBase implements DatabaseTest {

    private final Logger log = LoggerFactory.getLogger(this.getClass());

    private static final String TOPIC = GaussKafkaSourceTest.TOPIC;
    private static final String BOOTSTRAP_SERVERS = GaussKafkaSourceTest.BOOTSTRAP_SERVERS;
    private static final String GROUP_ID  = GaussKafkaSourceTest.GROUP_ID;
    private static final String TABLE_NAME  = GaussKafkaSourceTest.TABLE_NAME;
    private static final String SCAN_STARTUP_MODE  = GaussKafkaSourceTest.SCAN_STARTUP_MODE;

    // 定义接收器（Sink）表的物理结构
    protected final TableRow jdbcSinkTable = createJdbcSinkTable();

    /**
     * 定义接收器（Sink）表的物理结构
     */
    protected TableRow createJdbcSinkTable() {
        return tableRow(
                "TH_WIP_WAFER_CARRIER_HISTORY", // Sink 表名称
                pkField("ID", DataTypes.BIGINT().notNull()),
                field("COMPONENT_ID", DataTypes.BIGINT()),
                field("COMPONENT_NAME", DataTypes.VARCHAR(8)),
                field("PRE_LOT_NAME", DataTypes.VARCHAR(40)),
                field("PRE_CARRIER_NAME", DataTypes.VARCHAR(40)),
                field("PRE_SLOT_ID", DataTypes.INT()),
                field("NEW_LOT_NAME", DataTypes.VARCHAR(40)),
                field("NEW_CARRIER_NAME", DataTypes.VARCHAR(40)),
                field("NEW_SLOT_ID", DataTypes.INT()),
                field("ACTIVITY", DataTypes.VARCHAR(40)),
                field("TXN_COMMENT", DataTypes.VARCHAR(2000)),
                field("CREATED_BY", DataTypes.VARCHAR(40)),
                field("CREATED_BY_NAME", DataTypes.VARCHAR(80)),
                field("CREATED_TIME", DataTypes.TIMESTAMP()),
                field("LAST_UPDATED_BY", DataTypes.VARCHAR(40)),
                field("LAST_UPDATED_BY_NAME", DataTypes.VARCHAR(80)),
                field("LAST_UPDATED_TIME", DataTypes.TIMESTAMP()),
                field("PX_GAUSS_MARKS",  DataTypes.VARCHAR(50)),
                field("DATA_STATUS", DataTypes.DECIMAL(2, 0)),
                field("DATA_VERSION", DataTypes.DECIMAL(20, 0))
        );
    }

    /**
     * 定义源表（Kafka Source）的物理结构
     */
    protected TableRow createKafkaSourceTable() {
        return tableRow(
                "KAFKA_SOURCE_TABLE",
                pkField("ID", DataTypes.BIGINT().notNull()),
                field("COMPONENT_ID", DataTypes.BIGINT()),
                field("COMPONENT_NAME", DataTypes.VARCHAR(8)),
                field("PRE_LOT_NAME", DataTypes.VARCHAR(40)),
                field("PRE_CARRIER_NAME", DataTypes.VARCHAR(40)),
                field("PRE_SLOT_ID", DataTypes.INT()),
                field("NEW_LOT_NAME", DataTypes.VARCHAR(40)),
                field("NEW_CARRIER_NAME", DataTypes.VARCHAR(40)),
                field("NEW_SLOT_ID", DataTypes.INT()),
                field("ACTIVITY", DataTypes.VARCHAR(40)),
                field("TXN_COMMENT", DataTypes.VARCHAR(2000)),
                field("CREATED_BY", DataTypes.VARCHAR(40)),
                field("CREATED_BY_NAME", DataTypes.VARCHAR(80)),
                field("CREATED_TIME", DataTypes.TIMESTAMP()),
                field("LAST_UPDATED_BY", DataTypes.VARCHAR(40)),
                field("LAST_UPDATED_BY_NAME", DataTypes.VARCHAR(80)),
                field("LAST_UPDATED_TIME", DataTypes.TIMESTAMP()),
                field("PX_GAUSS_MARKS",  DataTypes.VARCHAR(50)),
                field("DATA_STATUS", DataTypes.DECIMAL(2, 0)),
                field("DATA_VERSION", DataTypes.DECIMAL(20, 0))
        );
    }

    @Override
    public List<TableManaged> getManagedTables() {
        return Arrays.asList(
                jdbcSinkTable
        );
    }

    @AfterEach
    void afterEach() {
        // 清理所有测试数据
        // 您可以根据需要实现此方法，以确保每个测试之间的隔离
    }

    @Test
    protected void testKafkaSourceToJdbcSink() throws Exception {
        // Step 1: 设置 Flink 执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        // 启用对象复用以优化性能
        env.getConfig().enableObjectReuse();

        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

        // Step 2: 定义 Kafka 源表
        TableRow kafkaSourceTable = createKafkaSourceTable();
        String createSourceTableDdl = kafkaSourceTable.getCreateQueryForFlink(
                kafkaSourceTable.getTableName(),
                Arrays.asList(
                        "'connector' = '"+ Constants.GAUSS_KAFKA_CDC_CONNECTOR_NAME +"'", // 根据您的实际连接器名称调整
                        "'topic' = '" + TOPIC + "'",
                        "'properties.bootstrap.servers' =  '" + BOOTSTRAP_SERVERS + "'",
                        "'properties.group.id' =  '" + GROUP_ID + "'",
                        "'table-name' =  '" + TABLE_NAME + "'",
                        "'scan.startup.mode' =  '" + SCAN_STARTUP_MODE + "'"
                )
        );
        tEnv.executeSql(createSourceTableDdl);

        // Step 3: 定义 JDBC 接收器表
        String sinkTableName = jdbcSinkTable.getTableName();

        String createSinkTableDdl = jdbcSinkTable.getCreateQueryForFlink(
                getMetadata(),
                sinkTableName,
                Arrays.asList(
                        "'connector' = '"+Constants.GAUSS_JDBC_NAME+"'",
                        String.format("'url' = '%s'", getMetadata().getJdbcUrl()),
                        String.format("'username' = '%s'", getMetadata().getUsername()),
                        String.format("'password' = '%s'", getMetadata().getPassword()),
                        String.format("'table-name' = '%s'", jdbcSinkTable.getTableName()),
                        "'sink.buffer-flush.max-rows' = '2'",
                        "'sink.buffer-flush.interval' = '0'",
                        "'sink.max-retries' = '0'"
                )
        );
        tEnv.executeSql(createSinkTableDdl);

        // Step 4: 定义 Flink 作业，将数据从源表写入接收器表
        String insertStatement = String.format(
                "INSERT INTO %s SELECT COMPONENT_ID, COMPONENT_NAME, PRE_LOT_NAME, PRE_CARRIER_NAME, PRE_SLOT_ID, NEW_LOT_NAME, NEW_CARRIER_NAME, NEW_SLOT_ID, ACTIVITY, TXN_COMMENT, CREATED_BY, CREATED_BY_NAME, CREATED_TIME, LAST_UPDATED_BY, LAST_UPDATED_BY_NAME, LAST_UPDATED_TIME, DATA_STATUS, DATA_VERSION FROM %s",
                sinkTableName,
                kafkaSourceTable.getTableName()
        );

        // 执行插入语句
        tEnv.executeSql(insertStatement);

        // Step 5: 等待作业完成
        int expectedRecordCount = 10; // Adjust based on your test data

        await().atMost(120, SECONDS).untilAsserted(() -> {

            // Step 6: 验打印 JDBC 接收器中的数据
            List<Row> insertedRows = jdbcSinkTable.selectAllTable(getMetadata());

            insertedRows.forEach(row -> {
                System.out.println("Row: " + row);
            });

            org.junit.jupiter.api.Assertions.assertTrue(insertedRows.size() >= expectedRecordCount);

        });

    }
}
