package com.intct.ods;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.intct.common.Constant;
import com.intct.util.KafkaUtil;
import com.intct.util.MysqlUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.SimpleStringSchema;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.configuration.RestOptions;
import org.apache.flink.connector.base.DeliveryGuarantee;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.kafka.clients.producer.ProducerConfig;

/**
 * @author gufg
 * @since 2025-07-02 15:38
 */
public class OdsApplication {
    public static void main(String[] args) throws Exception {
        // 配置属性
        Configuration conf = new Configuration();
        conf.set(RestOptions.BIND_PORT, "8081");

        // 创建环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(conf);
        env.setParallelism(1);

        // 开启检点
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);

        // 读MySQL保存到Kafka
//        mysqlToKafka(env, StartupOptions.initial(), "tid_initial", Constant.KAFKA_ODS_TOPIC_NAME);

        mysqlToKafka(env, StartupOptions.latest(), "tid_latest", Constant.KAFKA_ODS_TOPIC_NAME);

        // 启动作业
        env.execute("ods_application_exec");
    }

    /**
     * 读MySQL保存到Kafka
     * @param env
     * @param startupOptions
     * @param transactionalIdPrefix
     * @param topic
     */
    private static void mysqlToKafka(StreamExecutionEnvironment env,
                                     StartupOptions startupOptions, String transactionalIdPrefix, String topic) {
        // 配置数据源
        MySqlSource<String> mySqlSource = MysqlUtil.getMysqlSource(startupOptions);

        // driver_info  10   快照--全量   kafka/hbase/mysql    rowkey + 列族 + 列限定符 ==> put(修改)

        // 从数据源获取数据
        DataStreamSource<String> sourceDS = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "ods-mysql-sources" + topic);

        sourceDS.print();

        // 转换
        SingleOutputStreamOperator<String> mapDS = sourceDS.map(new MapFunction<String, String>() {
            @Override
            public String map(String jsonStr) throws Exception {
                // 转换为JSONObject类型
                JSONObject jsonObj = JSONObject.parseObject(jsonStr);
                // 获取source中的table名称
                String tableanme = jsonObj.getJSONObject("source").getString("table");

                // 清理不需要的Key和Value
                jsonObj.remove("source");
                jsonObj.remove("transaction");

                // 重组JSON--向JSONObject增加table名称
                jsonObj.put("table", tableanme);

                // TODO 数据加密

                // TODO .......

                return JSON.toJSONString(jsonObj);
            }
        });

        // 数据过滤
        SingleOutputStreamOperator<String> filterDS = mapDS.filter(f -> f != null);

        // 输出数据
        filterDS.sinkTo(KafkaUtil.getKafkaSink(topic, transactionalIdPrefix));

    }
}
