package com.atguigu.app.dwd;

import com.alibaba.fastjson.JSONObject;
import com.alibaba.ververica.cdc.connectors.mysql.MySQLSource;
import com.alibaba.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.alibaba.ververica.cdc.debezium.DebeziumSourceFunction;
import com.atguigu.app.function.FlinkSerializeDefine;
import com.atguigu.app.function.HbaseSinkDefine;
import com.atguigu.app.function.TableProcessFunctionDefine;
import com.atguigu.bean.TableProcess;
import com.atguigu.common.DWDStaticConstants;
import com.atguigu.utils.KafkaUtil;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.OutputTag;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;

/**
 * @Author:GaoFei
 * @Description: 数据流: mockDb->mysql->FlinkCDC-kafka->Flink->Kafka/phoenix
 * @Date:Created in 18:11
 * @Modified By:
 */
public class BaseDbApp {
    public static void main(String[] args) throws Exception {
        // 获取执行环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(1);
        /**
         * 生产环境下一定要写的，不过学习阶段还要开启hdfs比较麻烦先关闭
         *         System.setProperty("HADOOP_USER_NAME","atguigu");
         *         env.setStateBackend(new FsStateBackend(OdsStaticConstants.CHECK_POINT_URL));
         *         // 设置checkpoint时间
         *         env.enableCheckpointing(5000L);
         *         // 设置精准一次性
         *         env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
         *         // 设置check超时时间
         *         env.getCheckpointConfig().setCheckpointTimeout(10000L);
         *         // 设置最多两个checkpoint一块
         *         env.getCheckpointConfig().setMaxConcurrentCheckpoints(2);
         *         // 设置两个check间隔
         *         env.getCheckpointConfig().setMinPauseBetweenCheckpoints(3000L);
         */
        // 消费Kafka ods_base_db主题数据创建流
        DataStreamSource<String> dataStreamSource =
                env.addSource(KafkaUtil.getKafkaConsumer(DWDStaticConstants.DWD_KAFKA_TOPIC_ODS_BASE_DB, DWDStaticConstants.ODS_BASE_DB_GROUP_ID));
        // 将每行数据转换为JSON对象
        SingleOutputStreamOperator<JSONObject> operation = dataStreamSource
                .map(JSONObject::parseObject)
                .filter(f -> !"DELETE".equalsIgnoreCase(f.getString("operation")));
        // 使用FlinkCDC消费配置表
        DebeziumSourceFunction<String> build = MySQLSource.<String>builder()
                .hostname("192.168.10.100")
                .port(3306)
                .username("root")
                .password("admin")
                .databaseList("gmall2021_realtime")
                .tableList("gmall2021_realtime.table_process")
                .deserializer(new FlinkSerializeDefine())
                .startupOptions(StartupOptions.initial())
                .build();

        // 连接主流和广播流
        DataStreamSource<String> confDataStream = env.addSource(build);
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<>("conf", String.class, TableProcess.class);
        BroadcastStream<String> broadcast = confDataStream.broadcast(mapStateDescriptor);
        BroadcastConnectedStream<JSONObject, String> confWithDbDs = operation.connect(broadcast);
        // 分流 处理数据 广播流、主流数据
        // 提取Kafka流数据和Hbase流数据
        OutputTag<JSONObject> hbaseOutput = new OutputTag<JSONObject>("hbase") {};
        SingleOutputStreamOperator<JSONObject> kafkaDs =
                confWithDbDs.process(new TableProcessFunctionDefine(hbaseOutput, mapStateDescriptor));
        DataStream<JSONObject> hbaseDs = kafkaDs.getSideOutput(hbaseOutput);

        // 将Kafka数据写入Kafka主题，将Hbase数据写入Phoenix表
        hbaseDs.addSink(new HbaseSinkDefine());
        kafkaDs.addSink(KafkaUtil.sendToDwdMessage(new KafkaSerializationSchema<JSONObject>() {
            @Override
            public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObject, @Nullable Long aLong) {
                // 动态的发送到不同主题
                return new ProducerRecord<>(jsonObject.getString("sinkTable")
                        , jsonObject.getString("after").getBytes());
            }
        }));

        env.execute();
    }
}
