package com.intct.sql;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.EnvironmentSettings;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author gufg
 * @since 2025-11-13 08:33
 */
public class MysqlToKafkaJSON {
    public static void main(String[] args) {
        // 1. 初始化流执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        env.enableCheckpointing(5000L, org.apache.flink.streaming.api.CheckpointingMode.EXACTLY_ONCE);

        // 2. 初始化表执行环境（适配 Flink 1.15+）
        EnvironmentSettings settings = EnvironmentSettings.newInstance()
                .inStreamingMode()
                .build();
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env, settings);

        // 3. 注册 Kafka
        String createKafkaSink = "CREATE TABLE kafka_c8601 (" +
                "  before MAP<STRING, STRING>," +
                "  after MAP<STRING, STRING>," +
                "  op STRING, " +
                "  ts AS PROCTIME()" +
//                "  PRIMARY KEY (op) NOT ENFORCED" +
                ") WITH (" +
                "  'connector' = 'kafka'," +
                "  'properties.bootstrap.servers' = 'cdh-node:9092'," +
                "  'topic' = 'c8603'," +
                "  'properties.group.id' = 'kafka_group_011111'," +
                "  'scan.startup.mode'='earliest-offset'," +
                "  'value.format' = 'json'" +
                ")";
        tableEnv.executeSql(createKafkaSink);

        // 4.
        tableEnv.executeSql("SELECT op, after['id'], after['name'], ts FROM kafka_c8601 where op = 'c'").print();

        //tableEnv.executeSql("SELECT * FROM mysql_t1_cdc").print();
    }
}