package net.bwie.realtime.zh.dwd.log.job;

import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

/**
 * @BelongsProject: realtime-project-10zlq
 * @BelongsPackage: net.bwie.realtime.zh.dwd.log.job
 * @Author: zhangleqing
 * @CreateTime: 2025-08-21  17:13
 * @Description: TODO
 * @Version: 1.0
 */
public class DwdCarsLog {
    public static void main(String[] args) throws Exception {
        // 获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

        // 创建表的执行环境
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        // 设置检查点
        env.enableCheckpointing(50000);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        //输入表
        // 修正后的第一个表：ods_entrance_guard_log
        tEnv.executeSql("CREATE TABLE ods_entrance_guard_log (\n" +
                " id STRING,\n" +
                " openMode INT,\n" +
                " openTime STRING,\n" +
                " ownerId INT ,\n" +
                " PRIMARY KEY (id) NOT ENFORCED  // 修正引号错误，移除多余的注释符号\n" +
                ") WITH (\n" +
                " 'connector' = 'mysql-cdc',\n" +
                " 'hostname' = 'node101',\n" +
                " 'port' = '3306',\n" +
                " 'username' = 'root',\n" +
                " 'password' = '123456',\n" +
                " 'database-name' = 'sca',\n" +
                " 'table-name' = 'entrance_guard_log'\n" +
                ")\n");
        //tEnv.executeSql("SELECT * FROM ods_entrance_guard_log limit 10").print();

// 修正后的第二个表：ods_cars_log
        tEnv.executeSql("CREATE TABLE ods_cars_log (\n" +
                " id INT,\n" +
                " opTime STRING,\n" +
                " ctype INT,\n" +
                " carCode STRING,\n" +
                " cId INT,\n" +
                " PRIMARY KEY (id) NOT ENFORCED  // 修正引号错误，移除多余的注释符号\n" +
                ") WITH (\n" +
                " 'connector' = 'mysql-cdc',\n" +
                " 'hostname' = 'node101',\n" +
                " 'port' = '3306',\n" +
                " 'username' = 'root',\n" +
                " 'password' = '123456',\n" +
                " 'database-name' = 'sca',\n" +
                " 'table-name' = 'cars_Log'\n" +
                ")\n");
        tEnv.executeSql("select * from ods_cars_log limit 10");


        // 4. 创建Kafka Sink表（JSON格式输出）
        // 4.1 门禁日志输出到Kafka
//        tEnv.executeSql("CREATE TABLE kafka_entrance_guard_log (\n" +
//                " id STRING,\n" +
//                " openMode INT,\n" +
//                " openTime STRING,\n" +
//                " ownerId INT\n" +
//                ") WITH (\n" +
//                " 'connector' = 'kafka',\n" +
//                " 'topic' = 'dwd_entrance_guard_log',  // Kafka主题名\n" +
//                " 'properties.bootstrap.servers' = 'node101:9092',  // Kafka broker地址\n" +
//                " 'properties.group.id' = 'flink-cdc-kafka-sink',  // 消费者组（可选）\n" +
//                " 'format' = 'json',  // 输出格式为JSON\n" +
//                " 'json.ignore-parse-errors' = 'true',  // 忽略JSON解析错误\n" +
//                " 'sink.partitioner' = 'round-robin'  // 分区策略：轮询\n" +
//                ")\n");
//
//        // 4.2 车辆日志输出到Kafka
//        tEnv.executeSql("CREATE TABLE kafka_cars_log (\n" +
//                " id INT,\n" +
//                " opTime STRING,\n" +
//                " ctype INT,\n" +
//                " carCode STRING,\n" +
//                " cId INT\n" +
//                ") WITH (\n" +
//                " 'connector' = 'kafka',\n" +
//                " 'topic' = 'dwd_cars_log',  // Kafka主题名\n" +
//                " 'properties.bootstrap.servers' = 'node101:9092',\n" +
//                " 'properties.group.id' = 'flink-cdc-kafka-sink',\n" +
//                " 'format' = 'json',\n" +
//                " 'json.ignore-parse-errors' = 'true',\n" +
//                " 'sink.partitioner' = 'round-robin'\n" +
//                ")\n");
//
//        // 5. 将MySQL数据写入Kafka（自动JSON序列化）
//        // 5.1 写入门禁日志到Kafka
//        tEnv.executeSql("INSERT INTO kafka_entrance_guard_log\n" +
//                "SELECT id, openMode, openTime, ownerId FROM ods_entrance_guard_log");
//
//        // 5.2 写入车辆日志到Kafka
//        tEnv.executeSql("INSERT INTO kafka_cars_log\n" +
//                "SELECT id, opTime, ctype, carCode, cId FROM ods_cars_log");
//
//        // 执行作业
//        env.execute("MySQL CDC to Kafka (JSON)");
    }
}
