package com.atguigu.edu.realtime.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.edu.realtime.app.func.MyBroadcastFunction;
import com.atguigu.edu.realtime.app.func.MyPhoenixSink;
import com.atguigu.edu.realtime.bean.TableProcess;
import com.atguigu.edu.realtime.util.KafkaUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.source.MySqlSourceBuilder;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

/**
 * @author Lec
 * @date 2022/9/4 10:13
 */


public class DimSinkApp {
    public static void main(String[] args) throws Exception {
        //TODO 1.获取流的执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();

        //设置并行度
        env.setParallelism(1);

        //TODO 2.设置检查点和状态后端（这个占用资源比较多，我们就不开了，企业中要打开）
//
//        env.enableCheckpointing(5 * 60 * 1000L, CheckpointingMode.EXACTLY_ONCE);
//        env.getCheckpointConfig().setCheckpointTimeout(3*60*1000L);
//        env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
//        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/ck");
//        System.setProperty("HADOOP_USER_NAME", "atguigu");
//        env.setStateBackend(new HashMapStateBackend());


        //TODO 3.读取kafka对应的数据
        String topic = "topic_db";
        String groupID = "dim_sink_app";
        DataStreamSource<String> topicDbStream = env.addSource(KafkaUtil.getKafkaConsumer(topic, groupID));

//        topicDbStream.print("topic_db------>");

        //TODO 4.转换格式和清洗过滤脏数据
        //脏数据包括 不为json格式的和type为bootstrap-start和bootstrap-complete的

        /*
        这里的步骤应该先过滤出为空的  然后转为json  再然后过滤出json中type为上述两种的
        这么做的话稍微有点麻烦   所以这里可以考虑flatmap这个算子  一对多 多也可以是0
         */

       /* topicDbStream.flatMap(new FlatMapFunction<String, Object>() {
            @Override
            public void flatMap(String value, Collector<Object> out) throws Exception {
                try {
                    JSONObject jsonObject = JSON.parseObject(value);

                    String type = jsonObject.getString("type");

                    if (!("bootstrap-start".equals(type)||"bootstrap-complete".equals(type))) {
                        out.collect(jsonObject);
                    }

                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        });
*/

        //在上述的基础上将在那个数据写入到侧输出流中
        OutputTag dirtyOutputTag = new OutputTag<String>("Dirty") {
        };


        SingleOutputStreamOperator<JSONObject> jsonObjStream = topicDbStream.process(new ProcessFunction<String, JSONObject>() {

            @Override
            public void processElement(String value, Context ctx, Collector<JSONObject> out) throws Exception {
                try {
                    JSONObject jsonObject = JSON.parseObject(value);

                    String type = jsonObject.getString("type");

                    if (!("bootstrap-start".equals(type) || "bootstrap-complete".equals(type))) {
                        out.collect(jsonObject);
                    } else {
                        ctx.output(dirtyOutputTag, value);
                    }

                } catch (Exception e) {
                    ctx.output(dirtyOutputTag, value);
                    e.printStackTrace();
                }
            }
        });


        //获取脏数据的流
        DataStream dirtyStream = jsonObjStream.getSideOutput(dirtyOutputTag);

//        dirtyStream.print("drity--------->");

        //TODO 5. 使用flinkCDC读取配置表数据
        MySqlSource<String> mySqlSource = new MySqlSourceBuilder<String>()
                .hostname("hadoop102")
                .port(3306)
                .databaseList("edu_config")
                .tableList("edu_config.table_process")
                .username("root")
                .password("123456")

                //这两个容易忘了
                .deserializer(new JsonDebeziumDeserializationSchema()) //数据输出格式
                .startupOptions(StartupOptions.initial())
                .build();

        DataStreamSource<String> tableConfigStream = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "tableConfig");

        //tableConfigStream.print("cdc---->");

        //TODO 6.将配置流转换为广播流和主流进行连接
        MapStateDescriptor<String, TableProcess> descriptor = new MapStateDescriptor<>("table_process", String.class, TableProcess.class);
        BroadcastStream<String> broadcastStream = tableConfigStream.broadcast(descriptor);

        BroadcastConnectedStream<JSONObject, String> connectedStream = jsonObjStream.connect(broadcastStream);

        // TODO 7 处理连接流 根据配置流的信息  过滤出主流的维度表内容
        /*处理配置流：
        1）判断类型，如果是删除，从状态中去除当前的表
        2）判断对应的phoenix中是否有表格，没有创建
        3）将当前配置流的内容写入到状态
                处理主流
        1)解析主流获取table字段，判断是否存在状态里面，如果不存在，不是维度表删除
        2)过滤掉多余的字段，只保留在配置表中sink_columns中的字段
        3)添加上sink_table字段*/

        SingleOutputStreamOperator<JSONObject> streamOperator = connectedStream.process(new MyBroadcastFunction(descriptor));

        //streamOperator.print("filterTable------>");

        // TODO 8 将数据写入到phoenix中
        streamOperator.addSink(new MyPhoenixSink());


        //TODO 执行任务
        env.execute(groupID);




    }
}
