package com.zy.gmall.realtime.app.dwd.db;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import com.zy.gmall.realtime.app.func.MyBaseAppConnect;
import com.zy.gmall.realtime.bean.BaseDbTableProcess;
import com.zy.gmall.realtime.util.KafkaUtil;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.sink.KafkaSink;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;


public class BaseDbApp {
    public static void main(String[] args) {
        //1 基本环境准备
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        //2 检查点设置 开启 超时时间 检查点间隔 状态后端 检查点是否留存策略 重启策略 设置操作hadoop用户
//        env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);//开启
//        env.getCheckpointConfig().setCheckpointTimeout(100000L);//超时
//        env.setRestartStrategy(RestartStrategies.failureRateRestart(3,Time.days(30),Time.seconds(10)));//重启
//        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.NO_EXTERNALIZED_CHECKPOINTS);//是否清除

        //3 从kafka主题中读取数据
        String topic = "topic_db";
        String group_id = "base_db_app";
        KafkaSource<String> kafkaSource = KafkaUtil.getKafkaSource(topic, group_id);
        DataStreamSource<String> kafkaDs = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka_source");//封装为流
        //kafkaDs.print();

        //4 对流中的数据进行类型转换（去除脏数据）
        //类型转换并且ETL
        SingleOutputStreamOperator<JSONObject> jsonObjDs = kafkaDs.process(new ProcessFunction<String, JSONObject>() {
            @Override
            public void processElement(String s, ProcessFunction<String, JSONObject>.Context context, Collector<JSONObject> collector) throws Exception {
                try {
                    JSONObject JSONObj = JSON.parseObject(s);
                    //相除掉dim维度数据
                    if (!JSONObj.getString("type").equals("bootstrap-")) {
                        collector.collect(JSONObj);
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        });
        //jsonObjDs.print();

        //5 使用FlinkCDC,获取mysql数据源
        MySqlSource<String> mySqlSource = MySqlSource.<String>builder()
                .hostname("hadoop102")
                .port(3306)
                .databaseList("gmall_config") // set captured database
                .tableList("gmall_config.table_process_dwd") // set captured table
                .username("root")
                .password("000000")
                .deserializer(new JsonDebeziumDeserializationSchema()) // converts SourceRecord to JSON String
                .build();
        DataStreamSource<String> flinkCDCDs = env.fromSource(mySqlSource, WatermarkStrategy.noWatermarks(), "flinkCDC");
        //flinkCDCDs.print();

        //6 对读取的配置信息进行广播
        MapStateDescriptor<String, BaseDbTableProcess> mapStateDescriptor = new MapStateDescriptor<>("broadcast", String.class, BaseDbTableProcess.class);
        BroadcastStream<String> broadcastDs = flinkCDCDs.broadcast(mapStateDescriptor);
        //7 connect 关联主流和广播流
        BroadcastConnectedStream<JSONObject, String> connectDs = jsonObjDs.connect(broadcastDs);
        //8 对数据进行处理
        SingleOutputStreamOperator<JSONObject> process = connectDs.process(
                new MyBaseAppConnect(mapStateDescriptor)
        );
        //process.print();
        //9 将动态分流的数据，写入到不同的主题中 要注意如果直接使用工具类中的创建输出到kafka的连接器，只能输出到一个kafka主题中
        KafkaSink<JSONObject> kafkaSinkBySchema = KafkaUtil.getKafkaSinkBySchema(
                new KafkaRecordSerializationSchema<JSONObject>() {
                    @Nullable
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObject, KafkaSinkContext kafkaSinkContext, Long aLong) {
                        //因为不知道传来的是什么，所以状态的序列化需要自己手动
                        String sinkTable = jsonObject.getString("sinkTable");
                        jsonObject.remove("sinkTable");
                        return new ProducerRecord<byte[], byte[]>(sinkTable, jsonObject.toJSONString().getBytes());
                    }
                }
        );

        process.sinkTo(
                kafkaSinkBySchema
        );

        //不是表要写哦
        try {
            env.execute();
        } catch (Exception e) {
            throw new RuntimeException(e);
        }
    }

}









