package com.atguigu.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.ververica.cdc.connectors.mysql.MySQLSource;
import com.alibaba.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.atguigu.gmall.realtime.serializer.FlinkCDCMaxwellDeserializer;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.function.DimSinkFunction;
import com.atguigu.gmall.realtime.function.DynamicSplitStreamBroadcastProcessor;
import com.atguigu.gmall.realtime.utils.KafkaUtils;
import org.apache.commons.collections.MapUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.runtime.state.filesystem.FsStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;

/**
 * 从 Kafka 中读取 ods 层业务数据，并进行处理，发送到 DWD层
 *
 * @author lvbingbing
 * @date 2022-03-18 10:04
 */
public class BaseDBApp {
    public static void main(String[] args) throws Exception {
        // 1. 基本环境准备
        // 1.1 流处理环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 1.2 并行度设置
        env.setParallelism(2);
        // 2. 检查点相关设置
        // 2.1 开启检查点
        env.enableCheckpointing(6000L, CheckpointingMode.EXACTLY_ONCE);
        // 2.2 设置检查点超时时间
        CheckpointConfig checkpointConfig = env.getCheckpointConfig();
        checkpointConfig.setCheckpointTimeout(10000L);
        // 2.3 作业取消时保留检查点
        checkpointConfig.enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // 2.4 设置重启策略
        env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3, 5000L));
        // 2.5 设置状态后端
        env.setStateBackend(new FsStateBackend("hdfs://hadoop102:8020/ck/baseDb"));
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        // 3. 从 kafka 中读取数据
        DataStreamSource<String> kafkaDs = env.addSource(KafkaUtils.getKafkaSource("ods_base_db_m", "base_db_app_group"));
        // 4. 对数据类型进行转换
        SingleOutputStreamOperator<JSONObject> jsonObjDs = kafkaDs.map(JSON::parseObject);
        // 5. 简单的 ETL
        SingleOutputStreamOperator<JSONObject> filterDs = jsonObjDs.filter(jsonObject -> {
            String table = jsonObject.getString("table");
            JSONObject dataJsonObj = jsonObject.getJSONObject("data");
            return StringUtils.isNotEmpty(table) && MapUtils.isNotEmpty(dataJsonObj);
        });
        // 6. 使用 FlinkCDC 读取配置表数据
        // 6.1 获取 配置表 sourceFunction
        SourceFunction<String> configSourceFunction = MySQLSource.<String>builder()
                .hostname("hadoop102")
                .port(3306)
                .username("root")
                .password("139559")
                .databaseList("gmall2021_realtime")
                // 可选配置项，如果不指定该参数，则会读取上一个配置中指定的数据库下的所有表的数据
                // 注意：指定的时候需要使用 "db.table" 的方式
                .tableList("gmall2021_realtime.table_process")
                .startupOptions(StartupOptions.initial())
                .deserializer(new FlinkCDCMaxwellDeserializer())
                .build();
        // 6.2 读取数据封装成流
        DataStreamSource<String> configDataStream = env.addSource(configSourceFunction);
        // 6.3 为了让每一个并行度上处理业务数据时，都能使用配置流，需要将配置流广播下去
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("table_process", String.class, TableProcess.class);
        BroadcastStream<String> broadcastDataStream = configDataStream.broadcast(mapStateDescriptor);
        // 6.4 调用非广播流的 connect 方法，将业务流与配置流进行连接，得到连接流
        BroadcastConnectedStream<JSONObject, String> connectedStream = filterDs.connect(broadcastDataStream);
        // 7. 动态分流，将维度数据放到维度侧输出流，事实数据放到主流
        // 7.1 定义维度侧输出流的标记
        OutputTag<JSONObject> dimTag = new OutputTag<JSONObject>("dimTag") {
            private static final long serialVersionUID = 8088341508611753648L;
        };
        // 7.2 调用 process 方法，进行动态分流
        DynamicSplitStreamBroadcastProcessor broadcastProcessFunction = new DynamicSplitStreamBroadcastProcessor(dimTag, mapStateDescriptor);
        SingleOutputStreamOperator<JSONObject> processStreamDs = connectedStream.process(broadcastProcessFunction);
        // 7.3 获取维度侧输出流并打印
        DataStream<JSONObject> dimStream = processStreamDs.getSideOutput(dimTag);
        dimStream.print("维度数据");
        // 7.4 打印事实数据流
        processStreamDs.print("事实数据");
        // 8. 将维度侧输出流中的数据写到 HBase 中
        dimStream.addSink(new DimSinkFunction());
        // 9. 将主流数据写回到 Kafka 的 dwd 层
        processStreamDs.addSink(KafkaUtils.getKafkaSinkExactlyOnce(new KafkaSerializationSchema<JSONObject>() {
            @Override
            public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObject, @Nullable Long timestamp) {
                // topic
                String topic = jsonObject.getString("sink_table");
                // 要传输的数据
                JSONObject dataJsonObject = jsonObject.getJSONObject("data");
                return new ProducerRecord<>(topic, dataJsonObject.toJSONString().getBytes());
            }
        }));
        // 10. 触发程序执行
        env.execute();
    }
}
