package com.atguigu.gmall.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.ververica.cdc.connectors.mysql.MySQLSource;
import com.alibaba.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.atguigu.gmall.realtime.app.func.DimSink;
import com.atguigu.gmall.realtime.app.func.MyDeserializationSchemaFunction;
import com.atguigu.gmall.realtime.app.func.TableProcessFunction;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.util.MyKafkaUtil;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.OutputTag;
import org.apache.flink.util.StringUtils;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;

/**
 * @author: xu
 * @desc: 业务数据的DWD层
 * 数据流：web/app -> nginx -> SpringBoot -> MySQL -> FlinkApp -> Kafka(ods) -> FlinkApp -> Kafka(dwd)/Phoenix(dim)
 * 程  序：mockDb -> MySQL -> FlinkCDC -> Kafka(ZK) -> BaseDBApp -> Kafka/Phoenix(hbase,zk,hdfs)
 */
public class BaseDBApp {
    public static void main(String[] args) throws Exception {
        // TODO 1.准备环境
        // 1.1 创建流处理执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 1.2 设置并新度
        env.setParallelism(1);
        // 1.3 开启Checkpoint，并设置相关的参数
        // env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        // env.getCheckpointConfig().setCheckpointTimeout(60000);
        // env.setRestartStrategy(RestartStrategies.fixedDelayRestart(3,3000L));
        // env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        // env.setStateBackend(new FsStateBackend("hdfs://node1:8020/gmall/checkpoint/BaseDBApp"));
        // System.setProperty("HADOOP_USER_NAME", "root");

        // TODO 2.从Kafka的ODS层读取数据
        String topic = "ods_base_db_m";
        String groupId = "base_db_app_group";
        // 2.1 通过工具类获取Kafka的数据
        DataStreamSource<String> kafkaOdsStream = env.addSource(MyKafkaUtil.getKafkaSource(topic, groupId));

        // TODO 3.对DS中数据进行结构的转换，String-->Json，对数据进行ETL，如果table为空，或者data为空，或者长度<3，将这样的数据过滤掉
        SingleOutputStreamOperator<JSONObject> filterJsonStream = kafkaOdsStream
                .map(JSON::parseObject)
                .filter(jsonObj ->
                        !StringUtils.isNullOrWhitespaceOnly(jsonObj.getString("table"))
                                && jsonObj.getJSONObject("data") != null
                                && jsonObj.getString("data").length() > 3
                );
        // filterJsonStream.print("json>>>>");

        // TODO 4.使用FlinkCDC读取MySQL配置表形成广播流，连接主流和广播流
        // 获取DataStreamSource
        DataStreamSource<String> mysqlStream = env.addSource(
                MySQLSource.<String>builder()
                        .hostname("node1")
                        .port(3306)
                        .username("root")
                        .password("123456")
                        .databaseList("gmall_realtime")
                        .tableList("gmall_realtime.table_process")
                        .deserializer(new MyDeserializationSchemaFunction())
                        .startupOptions(StartupOptions.initial())
                        .build()
        );

        // 为了让每一个并行度上处理业务数据的时候，都能使用配置流的数据，那么需要将配置流广播下去
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("table-process", String.class, TableProcess.class);
        BroadcastStream<String> broadcastStream = mysqlStream.broadcast(mapStateDescriptor);
        // 调用非广播流的connect方法，将业务流与配置流进行连接
        BroadcastConnectedStream<JSONObject, String> connectedStream = filterJsonStream.connect(broadcastStream);

        // TODO 5.动态分流，事实表放到主流，写到kafka的DWD层；如果维度表通过侧输出流，写到HBase
        // 5.1 定义输出到Hbase的侧输出流标签
        OutputTag<JSONObject> hbaseTag = new OutputTag<>(TableProcess.SINK_TYPE_HBASE, TypeInformation.of(JSONObject.class));
        // 5.2 主流写到Kafka的数据
        SingleOutputStreamOperator<JSONObject> kafkaStream = connectedStream.process(
                new TableProcessFunction(hbaseTag, mapStateDescriptor)
        );
        // 5.3 获取侧输出流，就是要写到Hbase的数据
        DataStream<JSONObject> hbaseStream = kafkaStream.getSideOutput(hbaseTag);
        kafkaStream.print("Kafka事实>>>");
        hbaseStream.print("HBase维度>>>");

        // TODO 6.将维度数据保存到Phoenix对应的维度表中
        hbaseStream.addSink(new DimSink());

        // TODO 7.将事实数据写回到kafka的dwd层
        kafkaStream.addSink(MyKafkaUtil.getKafkaSinkBySchema(
                new KafkaSerializationSchema<JSONObject>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObj, @Nullable Long timestamp) {
                        String sinkTopic = jsonObj.getString("sink_table");
                        String dataJsonObj = jsonObj.getString("data");
                        return new ProducerRecord<>(sinkTopic, dataJsonObj.getBytes());
                    }
                }
        ));

        env.execute(BaseDBApp.class.getSimpleName());
    }
}
