package com.intct.sql;

import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @author gufg
 * @since 2025-11-12 14:44
 */
public class Test01 {
    public static void main(String[] args) {
        // 获取环境
        // DataStream环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // TableApi与SQL环境
        StreamTableEnvironment tenv = StreamTableEnvironment.create(env);

        // 设置并行度
        env.setParallelism(1);

        // 开启检查点
        env.enableCheckpointing(5000);

        // 创建表 -- SQL
        tenv.executeSql("create table flink_table(" +
                "   after string" +
                "   , source string" +
                "   , op string" +
                ") with (" +
                // 连接到kafka
                "  'connector' = 'kafka'" +
                // 主题
                "  ,'topic' = 'intct86'" +
                // 地址 端口
                "  ,'properties.bootstrap.servers' = 'cdh-node:9092'" +
                // 设置消费者组
                "  ,'properties.group.id' = 'kafka_group_011'" +
                // 如不设置有问题，启动模式
                /*
                    起始消费位点 scan.startup.mode 配置项决定了 Kafka consumer 的启动模式。有效值为：
                            group-offsets：从 Zookeeper/Kafka 中某个指定的消费组已提交的偏移量开始。
                            earliest-offset：从可能的最早偏移量开始。
                            latest-offset：从最末尾偏移量开始。
                            timestamp：从用户为每个 partition 指定的时间戳开始。
                            specific-offsets：从用户为每个 partition 指定的偏移量开始。
                 */
                "  ,'scan.startup.mode'='earliest-offset'" +
                "  ,'format' = 'json'" +
                ")");

//        tenv.executeSql("create table upsert_kafka(" +
//                "   after string" +
//                "   , source string" +
//                "   , op string" +
//                "   , PRIMARY KEY (op) NOT ENFORCED" +
//                ") with (" +
//                // 连接到kafka
//                "  'connector' = 'upsert-kafka'" +
//                // 主题
//                "  ,'topic' = 'intct86_1'" +
//                // 地址 端口
//                "  ,'properties.bootstrap.servers' = 'cdh-node:9092'" +
//                "  ,'key.format' = 'json'" +
//                "  ,'value.format' = 'json'" +
//                ")" );


        // 输出数据
//        tenv.executeSql("insert into upsert_kafka select * from flink_table");


        // 使用JDBC方式，映射MySQL表
        tenv.executeSql("CREATE TABLE MyTest07Table (" +
                "  id STRING," +
                "  name STRING," +
                "  PRIMARY KEY (id) NOT ENFORCED" +
                ") WITH (" +
                "   'connector' = 'jdbc'," +
                "   'url' = 'jdbc:mysql://cdh-node:13306/m1'," +
                "   'driver' = 'com.mysql.cj.jdbc.Driver'," +
                "   'username' = 'root'," +
                "   'password' = 'Test_090110'," +
                "   'table-name' = 'test07'" +
                ")");

        tenv.executeSql("insert into MyTest07Table(id,name) select op, after from flink_table");

        // 启动作业
    }
}
