package com.atguigu.education.app.dim;

import com.alibaba.fastjson.JSONException;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.education.app.func.MyBroadcastFunction;
import com.atguigu.education.app.func.MyPhoenixSink;
import com.atguigu.education.bean.TableProcess;
import com.atguigu.education.util.KafkaUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

/**
 * 从Kafka中读取数据
 */
public class DimSinkApp {
    public static void main(String[] args) throws Exception {
        //TODO 1、获取流的执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        //设置并行度（实际开发中根据Kafka的分区数来设置）
        env.setParallelism(1);

        //TODO 2、设置检查点和状态后端（实际开发中需要，测试环境因为电脑配置，这里不设置）
        /*
        env.enableCheckpointing(5 * 60 * 1000L, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointTimeout(3 * 60 * 1000L);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/ck");
        System.setProperty("HADOOP_USER_NAME","atguigu");
        env.setStateBackend(new HashMapStateBackend());
         */

        //TODO 3、从Kafka中读取数据
        String topic = "topic_db";
        String groupID = "dim_sink_app";
        DataStreamSource<String> streamSource = env.addSource(KafkaUtil.getKafkaConsumer(topic, groupID));
        //streamSource.print("db>>>>>>>");

        //TODO 4、转换和过滤不是JSON格式的数据
        //不是json的数据；
        //type类型为bootstrap-start和bootstrap-complete
        //将脏数据写到测输出流中

        //创建测输出流的标签
        OutputTag<String> dirtyOutputTag = new OutputTag<String>("Dirty") {
        };

        SingleOutputStreamOperator<JSONObject> jsonOutputStream = streamSource.process(new ProcessFunction<String, JSONObject>() {
            @Override
            public void processElement(String value, Context ctx, Collector<JSONObject> out) throws Exception {
                try {
                    JSONObject jsonObject = JSONObject.parseObject(value);
                    String type = jsonObject.getString("type");
                    if (!"bootstrap-start".equals(type) && !"bootstrap-complete".equals(type)) {
                        out.collect(jsonObject);
                    } else {
                        //输出脏数据的测输出流
                        ctx.output(dirtyOutputTag, value);
                    }
                } catch (JSONException e) {
                    //输出脏数据的测输出流
                    ctx.output(dirtyOutputTag, value);
                }
            }
        });

        //将侧写的脏数据转为流
        DataStream<String> dirtyStream = (DataStream<String>) jsonOutputStream.getSideOutput(dirtyOutputTag);
        //dirtyStream.print("dirty>>>>>");

        // TODO 5 使用flinkCDC读取配置表数据
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("hadoop102")
                .port(3306)
                .username("root")
                .password("group102")
                .databaseList("edu_config")
                // 坑 需要填写库名加表名
                .tableList("edu_config.table_process")
                .deserializer(new JsonDebeziumDeserializationSchema()) // 数据输出格式
                .startupOptions(StartupOptions.initial())
                .build();

        DataStreamSource<String> tableConfigStream = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "table_config");

        // TODO 6 将配置流转换为广播流和主流进行连接
        // K String(表名)  判断当前表是否为维度表
        // V (后面的数据)   能够完成在phoenix中创建表格的工作

        //广播流的状态
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("table_process", String.class, TableProcess.class);

        //转化为广播配置流
        BroadcastStream<String> broadcastStream = tableConfigStream.broadcast(mapStateDescriptor);

        //合并配置广播流
        BroadcastConnectedStream<JSONObject, String> connectedStream = jsonOutputStream.connect(broadcastStream);

        // TODO 7 处理连接流 根据配置流的信息  过滤出主流的维度表内容
        SingleOutputStreamOperator<JSONObject> filterTableStream =
                connectedStream.process(new MyBroadcastFunction(mapStateDescriptor));

        //filterTableStream.print("filterTableStream>>>>>>>>>>>>>>>>>>>");

        //todo 8、将数据写进phoenix
        filterTableStream.addSink(new MyPhoenixSink());


        //TODO 执行流
        env.execute(groupID);
    }
}

