package com.atguigu.edu.realtime.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.edu.realtime.app.dim.func.DimSinkFunction;
import com.atguigu.edu.realtime.app.dim.func.TableProcessFunction;
import com.atguigu.edu.realtime.bean.TableProcess;
import com.atguigu.edu.realtime.util.MyKafkaUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;
import org.apache.flink.util.Collector;

public class DimApp {   //   目的：把维度数据写入到hbase中
    public static void main(String[] args) throws Exception {
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);

        //1.得到topic_db的数据
        String topic = "topic_db";
        String groupID = "dim_app_group";  //ID一般与此文件的文件名相同(DimApp)

        FlinkKafkaConsumer<String> kafkaConsumer = MyKafkaUtil.getKafkaConsumer(topic, groupID);
        DataStreamSource<String> KafkaDstream = env.addSource(kafkaConsumer);
        //过滤一下  x先进行一次map转换数据格式，然后进行过滤
        SingleOutputStreamOperator<JSONObject> jsonObjDstream = KafkaDstream.map(jsonstring -> JSON.parseObject(jsonstring));
        SingleOutputStreamOperator<JSONObject> edujsonObjDstream = jsonObjDstream.filter(jsonObj -> jsonObj.getString("database").equals("edu"));
        //KafkaDstream.print();

        //2.得到  维度配置表的数据流
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("hadoop102") //服务器地址
                .port(3306)  //端口号
                .databaseList("edu_realtime_config") // 数据库名
                .tableList("edu_realtime_config.table_process") // 数据库表名
                .username("root")
                .password("000000")
                .deserializer(new JsonDebeziumDeserializationSchema()) // 把抓出来的数据反序列化成JSON
                .startupOptions(StartupOptions.initial()) //默认  抓取最新快照 jdbc+追踪之后的变化 binlog
                .build();

        DataStreamSource<String> tableProcessDataStream = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "table_process").setParallelism(1);
        tableProcessDataStream.print("cdc 获得数据:");  //引号里面的内容会出现在检索出来的内容最前

        //3.得到  广播流的处理
        //3.1为状态做定义
        // 状态格式 - 通过表名 来查询一套  topic_process的信息(通过一个字符串查询对象，k-v结构，map)
        // MapStateDescriptor  -> 状态描述符                                                                                          key
        MapStateDescriptor<String, TableProcess> tableProcessMapStateDescriptor = new MapStateDescriptor<String, TableProcess>("table_process_state",String.class, TableProcess.class);
        //3.2把配置表的配置流变成广播流
        BroadcastStream<String> tableProcessBroadCastDStream = tableProcessDataStream.broadcast(tableProcessMapStateDescriptor);
        //3.3把广播流合并到主流中 -->合为被广播的流
        BroadcastConnectedStream<JSONObject, String> broadcastConnectedStream = edujsonObjDstream.connect(tableProcessBroadCastDStream);


        //三个泛型  1.主流类型  2.广播流类型  3.最终输出流类型
        SingleOutputStreamOperator<JSONObject> dimJsonObjDStream = broadcastConnectedStream.process(new TableProcessFunction(tableProcessMapStateDescriptor));

        dimJsonObjDStream.print("维度数据 ：");
        //保存数据
        dimJsonObjDStream.addSink(new DimSinkFunction());
        env.execute();
    }
}
