package com.atguigu.gmall.realtime.app.dim;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.JSONValidator;
import com.atguigu.gmall.realtime.bean.TableProcess;
import com.atguigu.gmall.realtime.func.SinkPhoenixFunc;
import com.atguigu.gmall.realtime.func.TableProcessFunc;
import com.atguigu.gmall.realtime.util.KafkaUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer;

/**
 * @author caodan
 * @version 1.0
 * @date 2022-09-09 10:30
 * DIM 层数据处理
 */
public class DimSinkApp {
    public static void main(String[] args) throws Exception {

        // 创建实时环境
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        // 设置任务并行度
        env.setParallelism(4);
        // 设置检查点相关信息
        // 开启检查点
        env.enableCheckpointing(5000L, CheckpointingMode.EXACTLY_ONCE);
        //检查点超时时间
        env.getCheckpointConfig().setCheckpointTimeout(60000L);
        //取消之后job是否保留
        env.getCheckpointConfig().enableExternalizedCheckpoints(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //两个检查点之间的间隙
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(2000L);
        // 设置重试 策略
        env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30), Time.seconds(3)));
        // 设置状态后端
        env.setStateBackend(new HashMapStateBackend());
//        env.getCheckpointConfig().setCheckpointStorage(new JobManagerCheckpointStorage());
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/ck");
        //由于hadoop的文件管理问题，添加
        System.setProperty("HADOOP_USER_NAME", "atguigu");
        // flink 对接kafka 获取数据 ---> 主流
        String topicName = "topic_db";
        String groupId = "DIM_SINK_GROUP";

        FlinkKafkaConsumer<String> flinkKafkaConsumer = KafkaUtil.getFlinkKafkaConsumer(topicName, groupId);
        DataStreamSource<String> kafkaDataStream = env.addSource(flinkKafkaConsumer);

        // 对数据进行类型转换
        SingleOutputStreamOperator<JSONObject> jsonObjectSingleOutputStreamOperator =
                kafkaDataStream.map((MapFunction<String, JSONObject>) JSON::parseObject);
        // 对象进行ETL
        SingleOutputStreamOperator<JSONObject> filterJsonObj = jsonObjectSingleOutputStreamOperator
                .filter((FilterFunction<JSONObject>) jsonObject -> {
            String data = jsonObject.getString("data");
            boolean validateResult = JSONValidator.from(data).validate();
            String type = jsonObject.getString("type");
            if (validateResult) {
                return !"bootstrap-start".equals(type) && !"bootstrap-complete".equals(type);
            } else {
                return false;
            }
        });
        // 使用flink CDC 对接 mysql，获取维度相关数据 ---> 广播流
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("hadoop102")
                .port(3306)
                .databaseList("gmall_config")
                .tableList("gmall_config.table_process")
                .username("root")
                .password("000000")
                .deserializer(new JsonDebeziumDeserializationSchema())
                .startupOptions(StartupOptions.initial()).build();

        // 6.2 封装为流
        DataStreamSource<String> mysqlDataStream = env.
                fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "MysqlSource");

        //流的合并
        MapStateDescriptor<String, TableProcess> mapStateDescriptor =
                new MapStateDescriptor<>("map-state", String.class, TableProcess.class);
        BroadcastStream<String> broadcastStream = mysqlDataStream.broadcast(mapStateDescriptor);

        BroadcastConnectedStream<JSONObject, String> connectedStream = filterJsonObj.connect(broadcastStream);

        SingleOutputStreamOperator<JSONObject> streamOperator =
                connectedStream.process(new TableProcessFunc(mapStateDescriptor));

        streamOperator.addSink(new SinkPhoenixFunc());

        env.execute();
    }
}
