package com.mjf.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.ververica.cdc.connectors.mysql.MySQLSource;
import com.alibaba.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction;
import com.mjf.app.function.CustomerDeserialization;
import com.mjf.app.function.DimSinkFunction;
import com.mjf.app.function.TableProcessFunction;
import com.mjf.bean.TableProcess;
import com.mjf.utils.MyKafkaUtil;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;

/**
 * 将业务数据中的事实数据写入 Kafka，维度数据写入 HBase
 *
 * 数据流向：web/app -> nginx -> springboot -> MySQL -> flinkApp -> kafka(ods) -> flinkApp -> kafka(dwd)/HBase(dim)
 * 程序：mockDb(包含web/nginx/springboot) -> MySQL -> FlinkCDC(包含flinkApp/kafka(ods)) -> BaseDbApp(包含flinkApp/kafka(dwd)/HBase(dim))
 * 环境：dfs zookeeper kafka hbase phoenix(bin/sqlline.py)
 *
 * 备注：通过 Mysql(gmall-realtime.table_process) 配置 gmall-flink 库中的表属于事实表(Kafka-dwd)还是维度表(HBase-dim)
 *
 * 备注：为了防止主流数据比广播流数据先到导致主流数据丢失：
 *      1.可以通过先启动 {@link BaseDbApp}，再启动 {@link com.mjf.app.ods.FlinkCDC}。来确保广播流数据比主流数据先到
 *      2.可以在 {@link TableProcessFunction#open(Configuration)} 方法中通过JDBC先将配置信息加载加载到Map中。
 *      然后主流在状态中没有获取到配置信息时，还需要判断Map中有没有配置信息。
 */
public class BaseDbApp {
    public static void main(String[] args) throws Exception {

        // 1.获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);

/*
        // 测试时关闭
        // 设置状态后端
        env.setStateBackend(new FsStateBackend("hdfs://hadoop102:9000/gmall-flink/checkpoint"));
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointTimeout(10000L);
        env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);

        // 设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 10));
*/

        // 2.消费 Kafka(ods_base_db) 主题数据创建流
        String topic = "ods_base_db";
        String groupId = "BaseDbApp";
        DataStreamSource<String> kafkaDS = env.addSource(MyKafkaUtil.getKafkaConsumer(topic, groupId));

        // 3.将每行数据转换为 Json 对象并过滤(delete) 主流
        SingleOutputStreamOperator<JSONObject> jsonObjDS = kafkaDS
                .map(JSON::parseObject)
                .filter(new FilterFunction<JSONObject>() {
                    @Override
                    public boolean filter(JSONObject value) throws Exception {
                        String type = value.getString("type");
                        return !"delete".equals(type);
                    }
                });

        // 4.使用 FlinkCDC 消费配置表(用于区分数据库中的表属于事实表还是维度表)并处理成 广播流
        DebeziumSourceFunction<String> sourceFunction = MySQLSource.<String>builder()
                .hostname("hadoop103")
                .port(3306)
                .username("root")
                .password("123456")
                .databaseList("gmall-realtime")
                .tableList("gmall-realtime.table_process")
                .startupOptions(StartupOptions.initial())
                .deserializer(new CustomerDeserialization())
                .build();
        DataStreamSource<String> tableProcessDS = env.addSource(sourceFunction);
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("map-state", String.class, TableProcess.class);
        BroadcastStream<String> broadcastDS = tableProcessDS.broadcast(mapStateDescriptor);

        // 5.连接主流和广播流
        BroadcastConnectedStream<JSONObject, String> connectDS = jsonObjDS.connect(broadcastDS);

        // 6.分流 处理 广播流数据，主流数据（跟据广播流数据进行处理）
        OutputTag<JSONObject> hbaseTag = new OutputTag<JSONObject>("hbase-tag") {};
        SingleOutputStreamOperator<JSONObject> kafka = connectDS.process(new TableProcessFunction(hbaseTag, mapStateDescriptor));

        // 7.提取 HBase 流数据
        DataStream<JSONObject> hbase = kafka.getSideOutput(hbaseTag);

        // 8.将 Kafka 数据写入 Kafka 主题，将 HBase 数据写入 Phoenix 表
        kafka.print("Kafka=====>");
        hbase.print("HBase=====>");

        kafka.addSink(MyKafkaUtil.getKafkaProducer(new KafkaSerializationSchema<JSONObject>() {
            @Override
            public ProducerRecord<byte[], byte[]> serialize(JSONObject element, @Nullable Long timestamp) {
                return new ProducerRecord<byte[], byte[]>(
                        element.getString("sinkTable"),
                        element.getString("after").getBytes()
                );
            }
        }));

        hbase.addSink(new DimSinkFunction());

        // 9.启动任务
        env.execute(BaseDbApp.class.getName());

    }
}
