package com.intct.flink.study;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author gufg
 * @since 2025-10-10 14:06
 *
 * flink sql
 * Mysql -> print
 */
public class CdcSQLApp {
    public static void main(String[] args) throws Exception {
        // 配置属性
        Configuration conf = new Configuration();
        conf.set(RestOptions.BIND_PORT, "8081");

        // 1 获取取环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        // 并行度
        // env.setParallelism(1);
        // 开启检点
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
        // 获取SQL执行环境
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

        // 2 创建映射表-- Mysql
        /*启动模式
        配置选项scan.startup.mode指定 MySQL CDC 使用者的启动模式。有效枚举包括：
        initial （默认）：在第一次启动时对受监视的数据库表执行初始快照，并继续读取最新的 binlog。
        earliest-offset：跳过快照阶段，从可读取的最早 binlog 位点开始读取
        latest-offset：首次启动时，从不对受监视的数据库表执行快照， 连接器仅从 binlog 的结尾处开始读取，这意味着连接器只能读取在连接器启动之后的数据更改。
        specific-offset：跳过快照阶段，从指定的 binlog 位点开始读取。位点可通过 binlog 文件名和位置指定，或者在 GTID 在集群上启用时通过 GTID 集合指定。
        timestamp：跳过快照阶段，从指定的时间戳开始读取 binlog 事件。
        */

        tenv.executeSql(
                "CREATE TABLE mysql_test01 (\n" +
                        "     id INT,\n" +
                        "     name STRING,\n" +
                        "     update_time BIGINT,\n" +
                        "     price DECIMAL(10, 5),\n" +
                        "     create_time TIMESTAMP(3),\n" +
                        "     PRIMARY KEY(id) NOT ENFORCED\n" +
                        ") WITH (\n" +
                        "     'connector' = 'mysql-cdc',\n" +
                        "     'hostname' = 'cdh-node',\n" +
                        "     'port' = '13306',\n" +
                        "     'username' = 'root',\n" +
                        "     'password' = 'Test_090110',\n" +
                       //  "     'scan.startup.mode' = 'latest-offset',\n" +
                        "     'database-name' = 'm1',\n" +
                        "     'table-name' = 'test01'\n" +
                        ")"
        );

        // 3 映射print
        tenv.executeSql(
                "CREATE TABLE sink_test01 (\n" +
                        "     id INT,\n" +
                        "     name STRING,\n" +
                        "     update_time BIGINT,\n" +
                        "     price DECIMAL(10, 5),\n" +
                        "     create_time TIMESTAMP(3),\n" +
                        "     PRIMARY KEY(id) NOT ENFORCED\n" +
                        ") WITH (\n" +
                        "     'connector' = 'print'\n" +
                        ")"
        );

        // 4 映射kafka
        tenv.executeSql(
                "CREATE TABLE sink_kakfa (\n" +
                        "     id INT,\n" +
                        "     name STRING,\n" +
                        "     update_time BIGINT,\n" +
                        "     price DECIMAL(10, 5),\n" +
                        "     create_time TIMESTAMP(3),\n" +
                        "     PRIMARY KEY(id) NOT ENFORCED\n" +
                        ") WITH (\n" +
                        "  'connector' = 'upsert-kafka',\n" +
                        "  'topic' = 'sql_topic',\n" +
                        "  'properties.bootstrap.servers' = 'cdh-node:9092',\n" +
                        "  'key.format' = 'json',\n" +
                        "  'value.format' = 'json'\n" +
                        ")"
        );

        // 4 insert into select * from table_name
//        tenv.executeSql("insert into sink_test01 select * from mysql_test01").print();

        // 5 sink kakfak
        tenv.executeSql("insert into sink_kakfa select * from mysql_test01");

        // 仅用FlinkSQL方式，不需要执行启动作业
        // 如果使用flinksql + dtastream api方式，需要执行启动作业
//        env.execute("cdc_sql_job");

    }
}
