package com.zhu.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.ververica.cdc.connectors.mysql.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.DebeziumSourceFunction;
import com.zhu.config.JDBCConfig;
import com.zhu.function.DIMSinkFunction;
import com.zhu.serialization.CDCJsonDeserializationSchema;
import com.zhu.function.DIMTableProcessFunction;
import com.zhu.bean.TableProcess;
import com.zhu.config.ClusterParametersConfig;
import com.zhu.utils.ZhuKafkaUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.util.Collector;

/**
 * DimApp 维度提取
 * web /DWDInteractionCommentApp -> Nginx ->业务服务器 -> Mysql(binlog) -> Maxwell -> Kafka ODS topic ->FlinkApp ->Phoenix(Hbase)
 * Web Mysql Maxwell Kafka(Zookeeper) DimApp Phoenix(Hbase/Hadoop)
 */
public class DimDataDiversionApp {

    public static void main(String[] args) throws Exception {
        //TODO  1. StreamEnvironment
        StreamExecutionEnvironment streamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment();
        streamExecutionEnvironment.setParallelism(1);   //test  生产环境设置为 kafka主题的分区数量

        //checkPoint
        /*
        streamExecutionEnvironment.enableCheckpointing(5 * 60000L, CheckpointingMode.EXACTLY_ONCE); //精确一次

        //状态后端
        streamExecutionEnvironment.setStateBackend(new HashMapStateBackend());
        streamExecutionEnvironment.getCheckpointConfig().setCheckpointStorage(ClusterParametersConfig.HDFS_CHECKPOINT_FILE_DIR);  //检查点保存在hdfs
        System.setProperty("HADOOP_USER_NAME", "zhu");
        streamExecutionEnvironment.getCheckpointConfig().setCheckpointTimeout(10 * 60000L);  //TimeOut
        streamExecutionEnvironment.getCheckpointConfig().setMaxConcurrentCheckpoints(2);  //最大共存检查点
        streamExecutionEnvironment.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 5 * 1000L));  //重启策略

         */
        //TODO 2. 读取 kafka  topic_db data

        String kafkaTopic = ClusterParametersConfig.KAFKA_BASE_DB_TOPIC;
        String groupId = "dim_app_zhu_2023";
        DataStreamSource<String> kafkaDStream =
                streamExecutionEnvironment.addSource(ZhuKafkaUtil.getFlinkKafkaConsumer(kafkaTopic, groupId));

        //TODO 3. 过滤非json数据以及保留新增变化初始化的数据
        SingleOutputStreamOperator<JSONObject> filterJSONObjectDStream =
                kafkaDStream.flatMap(new FlatMapFunction<String, JSONObject>() {
                    @Override
                    public void flatMap(String value, Collector<JSONObject> collector) throws Exception {

                        try {
                            JSONObject jsonObject = JSON.parseObject(value);

                            //获取操作类型字段 保留新增 变化 初始化数据
                            String maxwellDataType = jsonObject.getString("type");
                            if ("insert".equals(maxwellDataType) ||
                                    "update".equals(maxwellDataType) || "bootstrap-insert".equals(maxwellDataType)) {
                                collector.collect(jsonObject);
                            }

                        } catch (Exception e) {
                            System.out.println("error data" + value);
                            e.printStackTrace();   //脏数据可以写到测输出流计算脏数据率
                        }


                    }
                });

        //TODO 4.FlinkCDC 读取MySQL配置表获取配置流

        DebeziumSourceFunction<String> mySqlSource = MySqlSource
                .<String>builder()
                .hostname(JDBCConfig.MYSQL_HOST_NAME)
                .port(JDBCConfig.MYSQL_CONNECT_PORT)
                .username(JDBCConfig.MYSQL_JDBC_MYSQL_CONNECT_USER)
                .password(JDBCConfig.MYSQL_JDBC_CONNECT_USER_PASSWORD)
                .databaseList(JDBCConfig.MYSQL_BINLOG_ENABLE_DATABASE) //指定数据库（可变形参 可指定多个）
                .tableList(JDBCConfig.MYSQL_BINLOG_ENABLE_TABLE) //指定表名 （以库名.表名 的方式指定）
                .deserializer(new CDCJsonDeserializationSchema())//自定义反序列化器
                .startupOptions(StartupOptions.initial()).build(); //initial通过查询的方式获取同步前的数据，后续通过监控binlog变化捕获数据
        //earliest通过binlog从开始处读取数据同步。注意点：需要在创建库之前开启binlog
        //latest 直接到binlog读取数据
        //specificOffset 指定保存点消费binlog
        //timestamp 指定时间戳消费binlog
        //创建配置流 stream
        SingleOutputStreamOperator<String> mysqlSourceDStream =
                streamExecutionEnvironment.addSource(mySqlSource, "tableProcessSource")
                        .assignTimestampsAndWatermarks(WatermarkStrategy.noWatermarks());


        //TODO 5.将配置流处理成广播流

        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("map-state", String.class, TableProcess.class);
        BroadcastStream<String> broadcastStream = mysqlSourceDStream.broadcast(mapStateDescriptor);

        //TODO 6.连接主流和广播流
        BroadcastConnectedStream<JSONObject, String> connectedStream = filterJSONObjectDStream.connect(broadcastStream);

        //TODO 7.处理连接流 状态读写
        SingleOutputStreamOperator<JSONObject> processedDIMDStream = connectedStream.process(new DIMTableProcessFunction(mapStateDescriptor));//过滤维度数据  只保留主流中的连接数据 10张维度表


        //TODO 8.将数据写出到Phoenix
        //jdbc sink sql 先给定 数据再来  无法确定维度表的字段数量 占位符无法给定  适合单表插入 JDBCSink 不适用 自定义
        processedDIMDStream.print(">>>>>>>>>>>>>>");
        processedDIMDStream.addSink(new DIMSinkFunction());

        //TODO 9. execute
        streamExecutionEnvironment.execute("DimApp");

    }
}




