package com.bw.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.ververica.cdc.connectors.mysql.MySQLSource;
import com.alibaba.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction;
import com.bw.app.functions.CustomDebezium;
import com.bw.app.functions.DimFunction;
import com.bw.app.functions.MyPartitioner;
import com.bw.app.functions.TableProcessFunction;
import com.bw.bean.TableProcess;
import com.bw.utils.MyKafkaUtil;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.functions.RichMapFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.sink.SinkFunction;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.streaming.connectors.kafka.partitioner.FlinkKafkaPartitioner;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;

public class BaseDbApp {
    // 数据源：APP/Web--->nginx--->SpringBoot--->Mysql(业务)---->FlinkCDC(业务)--->Kafka(ods)--->Flink--->Kafka(事实)/hbase(维度)

    // 程序：                           mock--->Mysql(业务)--FlinkCDC--->Kafka(ods)--->BaseDbApp--->Kafka(事实)/hbase(维度)

    public static void main(String[] args) throws Exception {
        // 1.获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        //        开启CK，创建文件夹
//        env.setStateBackend(new FsStateBackend("hdfs://hadoop102:9820/gmall-flink/ck"));
//         每隔5秒保存一次
//        env.enableCheckpointing(5000L);
//        精准消费一次
//        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
//        配置超时时间
//        env.getCheckpointConfig().setCheckpointTimeout(100000L);
//        同时最大可以运行几个ck
//        env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
//        上一个ck的尾部到下一个ck的头之间间隔
//        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
//        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        // 2.读取ods_base_db主题数据,转成Json数据  主流
        String topic = "ods_base_db";
        String groupId = "base_db_app_242105b_2";
        DataStreamSource<String> kafkaDS = env.addSource(MyKafkaUtil.getKafkaConsumer(topic, groupId));
        SingleOutputStreamOperator<JSONObject> jsonObjDS = kafkaDS.map(JSON::parseObject);
        // 3.把操作为delete数据过滤掉
        SingleOutputStreamOperator<JSONObject> jsonObjFilterDS = jsonObjDS.filter(new FilterFunction<JSONObject>() {
            @Override
            public boolean filter(JSONObject jsonObject) throws Exception {
                // 判断是否要delete数据
                String type = jsonObject.getString("type");
                return !"delete".equals(type);
            }
        });
        // 4.通过CDC读取配置表，并转成广播流 配置binlog 重启mysql服务
        DebeziumSourceFunction<String> mysqlSource = MySQLSource.<String>builder()
                .hostname("hadoop102")
                .port(3306)
                .username("root")
                .password("123456")
                .databaseList("gmall-flink-config")
                .startupOptions(StartupOptions.initial())
                .deserializer(new CustomDebezium())
                .build();
        DataStreamSource<String> mysqlSourceDS = env.addSource(mysqlSource);
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<String, TableProcess>("map-state", String.class, TableProcess.class);
        BroadcastStream<String> broadcastDS = mysqlSourceDS.broadcast(mapStateDescriptor);

        // 5.把主流和广播进行connect
        BroadcastConnectedStream<JSONObject, String> connectDS = jsonObjFilterDS.connect(broadcastDS);
        // 6.分别处理广播流，把广播流进行保存状态，主流读取广播状态
        OutputTag<JSONObject> hbaseTag = new OutputTag<JSONObject>("hbase_tag") {
        };
        // 7.分流操作 kafka放到主流中，Hbase放到侧输出流
        // 广播流：1.把字符串转成JavaBean  2.执行建表语句  3.存状态
        // 主流： 1.读取广播状态   2.过滤字段   3.分流
        SingleOutputStreamOperator<JSONObject> processDS = connectDS.process(new TableProcessFunction(mapStateDescriptor, hbaseTag));


        // 8.读取主流和侧输出流，存到Kafka/Hbase当中
        DataStream<JSONObject> hbaseDS = processDS.getSideOutput(hbaseTag);
        hbaseDS.print("hbase");
        processDS.print("kafka");

        // upsert into GMALL2105_REALTIME.DIM_BASE_TRADEMARK (id,tm_name) values(?,?)
        hbaseDS.addSink(new DimFunction());
        processDS.addSink(MyKafkaUtil.getKafkaProducer(new KafkaSerializationSchema<JSONObject>() {
            @Override
            public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObject, @Nullable Long aLong) {
                return new ProducerRecord<byte[], byte[]>(jsonObject.getString("sinkTable"),
                        jsonObject.getString("after").getBytes());
            }
        }));
        // 9.启动
        env.execute("BaseDbApp");

    }

}
