package com.atguigu.flink.edu.app.dwd.db;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.flink.edu.app.fun.BaseDbTableProcessFunction;
import com.atguigu.flink.edu.beans.TableProcess;
import com.atguigu.flink.edu.utils.MyKafkaUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.connectors.mysql.table.StartupOptions;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.restartstrategy.RestartStrategies;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.time.Time;
import org.apache.flink.api.common.typeinfo.Types;
import org.apache.flink.connector.kafka.sink.KafkaRecordSerializationSchema;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.BroadcastConnectedStream;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerRecord;

import java.util.Properties;

public class BaseDbApp {
    public static void main(String[] args) throws Exception {
        //TODO 1 : 基本环境准备
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
        env.setParallelism(4);
        //TODO 2 : 检查点设置
        //env.enableCheckpointing(5000, CheckpointingMode.EXACTLY_ONCE);
        //env.getCheckpointConfig().setMinPauseBetweenCheckpoints(2000);
        //env.getCheckpointConfig().setCheckpointTimeout(60*1000);
        //env.setRestartStrategy(RestartStrategies.failureRateRestart(3, Time.days(30),Time.seconds(3)));
        //env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);
        //env.setStateBackend(new HashMapStateBackend());
        //env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop102:8020/gmall/ck");
        //System.setProperty("HADOOP_USER_NAME","atguigu");
        //TODO 3 : 从kafka中读取数据
        //3.1:声明消费者主题,消费者组
        String topic = "topic_db";
        String groupId = "base_db_group";
        //3.2:获取kafka消费者对象
        KafkaSource<String> kafkaSource = MyKafkaUtil.getKafkaSource(topic, groupId);
        //TODO 4 : 对读取的数据进行简单的ETL,以及类型的转换
        //将消费数据分装为流
        DataStreamSource<String> kafkaStrDs = env.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), "kafka_source");
        //进行ETL,并且进行类型转换
        SingleOutputStreamOperator<JSONObject> jsonObjDs = kafkaStrDs.process(new ProcessFunction<String, JSONObject>() {
            @Override
            public void processElement(String kafkaStrDs, Context context, Collector<JSONObject> out) throws Exception {
                try {
                    JSONObject jsonObjDs = JSON.parseObject(kafkaStrDs);
                    if (!jsonObjDs.getString("type").equals("bootstrap-start")
                            && !jsonObjDs.getString("type").equals("bootstrap-insert")
                            && !jsonObjDs.getString("type").equals("bootstrap-complete")) {
                        out.collect(jsonObjDs);
                    }
                } catch (Exception e) {
                    e.printStackTrace();
                }
            }
        });
        //TODO 5 : 使用CDC读取配置表信息
        Properties properties = new Properties();
        properties.setProperty("useSSL","false");
        MySqlSource<String> sqlSource = MySqlSource.<String>builder()
                .hostname("hadoop001")
                .port(3306)
                .password("000000")
                .username("root")
                .databaseList("edu_config")
                .tableList("edu_config.table_process")
                .jdbcProperties(properties)
                .startupOptions(StartupOptions.initial())
                .deserializer(new JsonDebeziumDeserializationSchema())
                .build();
        DataStreamSource<String> sqlSourceDs = env.fromSource(sqlSource, WatermarkStrategy.noWatermarks(), "sqlSource");
        //TODO 6 : 将配置信息进行广播
        MapStateDescriptor<String, TableProcess> mapStateDescriptor = new MapStateDescriptor<String, TableProcess>(
                "mapstate",
                Types.STRING,
                Types.POJO(TableProcess.class)
        );
        BroadcastStream<String> broadCastDs = sqlSourceDs.broadcast(mapStateDescriptor);
        //TODO 7 : 将主流和广播流进行关联
        BroadcastConnectedStream<JSONObject, String> connectDs = jsonObjDs.connect(broadCastDs);
        //TODO 8 : 对关联数据进行处理
        SingleOutputStreamOperator<JSONObject> realDs = connectDs.process(new BaseDbTableProcessFunction(mapStateDescriptor));
        realDs.print("===>");
        //TODO 9 : 将流中的数据写入到kafka的不同主题中
        realDs.sinkTo(MyKafkaUtil.getKafkaSinkBySchema("base_db",
                new KafkaRecordSerializationSchema<JSONObject>() {
                    @Override
                    public ProducerRecord<byte[], byte[]> serialize(JSONObject jsonObj, KafkaSinkContext context, Long timestamp) {
                        String topic = jsonObj.getString("sink_table");
                        jsonObj.remove("sink_table");
                        return new ProducerRecord<byte[], byte[]>(topic,jsonObj.toJSONString().getBytes());
                    };
                }));
        env.execute();
    }
}
