package com.atguigu.app.dwd.db;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.Func.DwdTableProcessFunction;
import com.atguigu.Util.MyKafkaUtil;
import com.atguigu.bean.TableProcess;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * @author hjy
 * @create 2023/3/13 16:05
 */

/**
 * 出现的错误：
 *      1.hashMap没有初始化-导致报空指针异常
 *      2.cdc没有定义反序列化器，所以导致拿不到数据
 */
//数据流:app/客户端->mysql->maxwell->kafka(topic_db  ods)->BaseDBApp->kafka(dwd)
//程 序:mock->mysql->maxwell->kafka(zk)->BaseDBApp->kafka(zk)
public class BaseDBApp {
    public static void main(String[] args) throws Exception {
        //todo 1 创建环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
//        env.setParallelism(1);
//        env.enableCheckpointing(5000L);
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall-flink/check");
//        env.getCheckpointConfig().setCheckpointTimeout(60000L);
//        env.setStateBackend(new HashMapStateBackend());
//        System.setProperty("HADOOP_USER_NAME", "atguigu");
        //todo 2 从kafka 的topic_db主题获取信息
        String topic="topic_db";
        String groupID="BaseDBApp";
        DataStreamSource<String> kafkaDS = env.addSource(MyKafkaUtil.getFlinkKafkaConsumer(topic, groupID));
        //todo 3 对获取到的信息做过滤并转为json对象 主流
        SingleOutputStreamOperator<JSONObject> jsonObjectDS = kafkaDS.flatMap(new FlatMapFunction<String, JSONObject>() {
            @Override
            public void flatMap(String value, Collector<JSONObject> out) throws Exception {
                if (value != null) {
                    JSONObject jsonObject = JSON.parseObject(value);
                    out.collect(jsonObject);
                }
            }
        });
        //todo 4 使用cdc从mysql读取配置信息
        MySqlSource<String> mysql = MySqlSource.<String>builder()
                .hostname("hadoop102")
                .port(3306)
                .username("root")
                .password("123456")
                .databaseList("gmall_config")
                .tableList("gmall_config.table_process")
                .deserializer(new JsonDebeziumDeserializationSchema())
                .startupOptions(StartupOptions.latest())
                .build();
        DataStreamSource<String> configDS = env.fromSource(mysql, WatermarkStrategy.noWatermarks(), "mysql");
        //todo 5 将配置流转为广播流
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("map-state", String.class, TableProcess.class);
        BroadcastStream<String> broadcast = configDS.broadcast(mapStateDescriptor);
        //todo 6 合并两个流
        BroadcastConnectedStream<JSONObject, String> connect = jsonObjectDS.connect(broadcast);
        //todo 7 将广播流的数据放入广播状态，并利用广播信息对主流进行过滤
        SingleOutputStreamOperator<JSONObject> processDS = connect.process(new DwdTableProcessFunction(mapStateDescriptor));
        //todo 8 将过滤的数据写入不同的kafka主题
        /**
         * 这里要将不同的数据去往不同的kafka主题 所以之前自定义的kafkaSink不适用，这里要重新定义一个kafkaSink来动态的传入数据要去往的主题
         */
        processDS.addSink(MyKafkaUtil.getKafkaProducer());
        //todo 9 启动程序
        env.execute();
    }
}
