package org.example;

import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.table.api.TableResult;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;

public class Cdc2HudiMultiMORTableJob_DELETE {

    public static void main(String[] args) throws Exception {
        // 1. 创建执行环境
        Configuration conf = new Configuration();
        // 设置REST端点端口
        conf.setInteger(RestOptions.PORT, 8081);

        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        // 设置并行度为1
        env.setParallelism(1);
        
        StreamTableEnvironment tableEnv = StreamTableEnvironment.create(env);
        
        // 2. 配置作业参数 - 使用API配置Flink参数
        env.enableCheckpointing(15000);
        env.getCheckpointConfig().setCheckpointTimeout(600000);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(1);
        env.getCheckpointConfig().setTolerableCheckpointFailureNumber(3);
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(
            CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        env.getCheckpointConfig().setCheckpointStorage("file:///tmp/hudi/checkpoint");
        
        // 3. 注册自定义UDF函数
        System.out.println("注册自定义UDF函数");
        tableEnv.createTemporaryFunction("JSON_TO_MAP", JsonToMapFunction.class);
        tableEnv.createTemporaryFunction("JSON_PATH_EXTRACT", JsonPathExtractor.class);
        tableEnv.createTemporaryFunction("JSON_ARRAY_TABLE", JsonArrayFunction.class);
        
        // 4. 创建Kafka源表 - 使用正确的元数据列名
        String sql = "CREATE TABLE kafka_source (\n" +
                "  `value` STRING,\n" +
                "  `topic` STRING METADATA FROM 'topic' VIRTUAL,\n" +          // 修改元数据字段名称
                "  `partition` INT METADATA FROM 'partition' VIRTUAL,\n" +
                "  `headers` MAP<STRING, BYTES> METADATA FROM 'headers' VIRTUAL,\n" +
                "  `leader-epoch` INT METADATA FROM 'leader-epoch' VIRTUAL,\n" +
                "  `offset` BIGINT METADATA FROM 'offset' VIRTUAL,\n" +
                "  `timestamp` TIMESTAMP_LTZ(3) METADATA FROM 'timestamp' VIRTUAL,\n" +
                "  `timestamp-type` STRING METADATA FROM 'timestamp-type' VIRTUAL\n" +
                ") WITH (\n" +
                "  'connector' = 'kafka',\n" +
                "  'topic' = 'ucenter_demo',\n" +
                "  'properties.bootstrap.servers' = '10.94.162.31:9092',\n" +
                "  'properties.group.id' = 'group1',\n" +
                "  'scan.startup.mode' = 'earliest-offset',\n" +
                "  'properties.enable.auto.commit' = 'false',\n" +
                "  'format' = 'raw',\n" +
                "  'raw.charset' = 'UTF-8'\n" +
                ")";
        System.out.println("执行SQL: \n" + sql);
        tableEnv.executeSql(sql);

        // 测试查询
//        System.out.println("测试查询Kafka数据：");
//        tableEnv.executeSql("SELECT * FROM kafka_source").print();

        // 5. 修改基础解析视图，使用JSON_PATH_EXTRACT处理data数组
        sql = "CREATE VIEW base_cdc_view AS \n" +
                "SELECT \n" +
                "  `value` as json_data,\n" +
                "  full_json_map['database'] AS db_name, \n" +
                "  full_json_map['table'] AS table_name, \n" +
                "  full_json_map['type'] AS op_type, \n" +
                "  CAST(full_json_map['ts'] AS BIGINT) AS ts, \n" +
                "  full_json_map['data'] AS data_json,\n" +             // 修改这里，使用JSON_PATH_EXTRACT
                "  full_json_map AS full_json_map,\n" +
                "  `topic`,\n" +
                "  `partition`,\n" +
                "  `offset`,\n" +
                "  `timestamp`\n" +
                "FROM (\n" +
                "  SELECT \n" +
                "    *,\n" +
                "    JSON_TO_MAP(`value`) AS full_json_map\n" +
                "  FROM kafka_source\n" +
                ")";
        System.out.println("执行SQL: \n" + sql);
        tableEnv.executeSql(sql);

        // 6. 创建展开data数组的视图
        sql = "CREATE VIEW data_array_view AS \n" +
                "SELECT \n" +
                "  base.*,\n" +
                "  data_idx AS data_index,\n" +
                "  JSON_TO_MAP(data_element) AS data_map\n" +
                "FROM base_cdc_view base, \n" +
                "LATERAL TABLE(JSON_ARRAY_TABLE(data_json)) AS t(data_idx, data_element)";
        System.out.println("执行SQL: \n" + sql);
        tableEnv.executeSql(sql);
        
        // 7. 创建CDC视图，使用已解析的基础字段
        sql = "CREATE VIEW cdc_view AS \n" +
                "SELECT \n" +
                "  db_name, \n" +
                "  table_name, \n" +
                "  op_type, \n" +
                "  ts, \n" +
                "  data_index,\n" +
                "  json_data AS raw_data, \n" +
                "  data_map,\n" +
                "  full_json_map \n" +
                "FROM data_array_view";
        System.out.println("执行SQL: \n" + sql);
        tableEnv.executeSql(sql);

//        tableEnv.executeSql("select * from base_cdc_view").print();
        
        // 8. 创建用户信息视图，从data_map中获取字段
        sql = "CREATE VIEW user_data_view AS \n" +
                "SELECT \n" +
                "  data_map['id'] AS id, \n" +
                "  data_map['name'] AS name, \n" +
                "  CAST(data_map['age'] AS INT) AS age, \n" +
                "  data_map['dt'] AS dt, \n" +
                "  ts,\n" +
                "  CASE WHEN op_type = 'DELETE' THEN TRUE ELSE FALSE END AS is_deleted\n" +
                "FROM cdc_view \n" +
                "WHERE db_name = 'ucenter' \n" +
                "  AND table_name = 't_user'";
        System.out.println("执行SQL: \n" + sql);
        tableEnv.executeSql(sql);
        
        // 9. 创建订单信息视图，从data_map中获取字段
        sql = "CREATE VIEW order_data_view AS \n" +
                "SELECT \n" +
                "  data_map['order_id'] AS order_id, \n" +
                "  data_map['user_id'] AS user_id, \n" +
                "  data_map['amount'] AS amount, \n" +
                "  data_map['dt'] AS dt, \n" +
                "  ts,\n" +
                "  CASE WHEN op_type = 'DELETE' THEN TRUE ELSE FALSE END AS is_deleted\n" +
                "FROM cdc_view \n" +
                "WHERE db_name = 'ucenter' \n" +
                "  AND table_name = 't_user_order'";
        System.out.println("执行SQL: \n" + sql);
        tableEnv.executeSql(sql);

        // 10. 创建用户信息Hudi表
        sql = "CREATE TABLE hudi_user_table_mor (\n" +
                "  id STRING PRIMARY KEY NOT ENFORCED,\n" +
                "  name STRING,\n" +
                "  age INT,\n" +
                "  dt STRING,\n" +
                "  ts BIGINT,\n" +
                "  is_deleted BOOLEAN\n" +  // 添加软删除标记字段
                ") WITH (\n" +
                "  'connector' = 'hudi',\n" +
                "  'path' = 'file:///tmp/hudi/user_info_mor',\n" +
                "  'table.type' = 'MERGE_ON_READ',\n" +
                "  'precombine.field' = 'ts',\n" +
                "  'write.tasks' = '1',\n" +
                "  'compaction.tasks' = '1'\n" +
                ")";
        System.out.println("执行SQL: \n" + sql);
        tableEnv.executeSql(sql);
        
        // 11. 创建订单信息Hudi表
        sql = "CREATE TABLE hudi_order_table_mor (\n" +
                "  order_id STRING PRIMARY KEY NOT ENFORCED,\n" +
                "  user_id STRING,\n" +
                "  amount STRING,\n" +
                "  dt STRING,\n" +
                "  ts BIGINT,\n" +
                "  is_deleted BOOLEAN\n" +  // 添加软删除标记字段
                ") WITH (\n" +
                "  'connector' = 'hudi',\n" +
                "  'path' = 'file:///tmp/hudi/order_info_mor',\n" +
                "  'table.type' = 'MERGE_ON_READ',\n" +
                "  'precombine.field' = 'ts',\n" +
                "  'write.tasks' = '1',\n" +
                "  'compaction.tasks' = '1'\n" +
                ")";
        System.out.println("执行SQL: \n" + sql);
        tableEnv.executeSql(sql);
        
        // 12. 使用纯Flink SQL的STATEMENT SET语法来组合和执行多个INSERT语句
        sql = "EXECUTE STATEMENT SET\n" +
            "BEGIN\n" +
            "\n" +
            "  INSERT INTO hudi_user_table_mor\n" +
            "  SELECT id, name, age, dt, ts, is_deleted\n" +
            "  FROM user_data_view;\n" +
            "\n" +
            "  INSERT INTO hudi_order_table_mor\n" +
            "  SELECT order_id, user_id, amount, dt, ts, is_deleted\n" +
            "  FROM order_data_view;\n" +
            "\n" +
            "END";
        System.out.println("执行SQL: \n" + sql);
        TableResult result = tableEnv.executeSql(sql);
        
        // 13. 打印执行结果
        result.print();
    }
} 