package com.atguigu.edu.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.edu.realtime.bean.TableProcess;
import com.atguigu.edu.realtime.common.Constant;
import com.atguigu.edu.realtime.utils.JdbcUtil;
import com.ververica.cdc.connectors.mysql.source.MySqlSource;
import com.ververica.cdc.debezium.JsonDebeziumDeserializationSchema;
import lombok.extern.slf4j.Slf4j;
import org.apache.flink.api.common.eventtime.WatermarkStrategy;
import org.apache.flink.api.common.functions.FilterFunction;
import org.apache.flink.api.common.functions.MapFunction;
import org.apache.flink.api.common.serialization.DeserializationSchema;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.common.typeinfo.TypeHint;
import org.apache.flink.api.common.typeinfo.TypeInformation;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.configuration.Configuration;
import org.apache.flink.connector.kafka.source.KafkaSource;
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer;
import org.apache.flink.runtime.state.hashmap.HashMapStateBackend;
import org.apache.flink.streaming.api.CheckpointingMode;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.CheckpointConfig;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer;
import org.apache.flink.streaming.connectors.kafka.KafkaSerializationSchema;
import org.apache.flink.util.Collector;
import org.apache.kafka.clients.producer.ProducerRecord;

import javax.annotation.Nullable;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.sql.Connection;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Properties;

/**todo 采用动态分流来实现
 * 这里写一半发现 ：
 *  评价事实表、
 *  做卷记录事实表、
 *  做卷答题记录事实表 写法比较单一，所以采用动态分流，练习这种写法
 *
 * mysql表对应：
 * review_info、test_exam、test_exam_question
 */
@Slf4j
public class Dwd_DynamicSplitFlow {

    public static final String ckAndGroupIdAndJobName = "Dwd_TrafficLogAppVideoDetail";
    public static final String source_topic = Constant.TOPIC_ODS_DB;
    public static final Integer port = 3006;
    // todo 这个地方后期可以优化改为执行传参
    public static final Integer parallelism = 2;

    public static void main(String[] args) {
        /**
            1、设置系统用户
         */
        System.setProperty("HADOOP_USER_NAME", "atguigu");

        /**
            2、创建执行环境
         */
        Configuration configuration = new Configuration();
        configuration.setInteger("port", port);
        StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment(configuration);
        env.setParallelism(parallelism);
        // 设置状态后端
        env.setStateBackend(new HashMapStateBackend());
        // 设置checkpoint相关
        env.enableCheckpointing(3000);
        env.getCheckpointConfig().setCheckpointingMode(CheckpointingMode.EXACTLY_ONCE);
        env.getCheckpointConfig().setCheckpointStorage("hdfs://hadoop162:8020/edu/" + ckAndGroupIdAndJobName);
        env.getCheckpointConfig().setCheckpointTimeout(60 * 1000);
        env.getCheckpointConfig().setMinPauseBetweenCheckpoints(500);
        env.getCheckpointConfig().setExternalizedCheckpointCleanup(CheckpointConfig.ExternalizedCheckpointCleanup.RETAIN_ON_CANCELLATION);

        /**
            3、这里读取ods_db就不用table的形式了，还是用stream
         */
        DataStreamSource<String> topicDbDs = env.fromSource(
                KafkaSource.<String>builder()
                        .setBootstrapServers(Constant.KAFKA_BROKERS)
                        .setTopics(source_topic)
                        .setGroupId(ckAndGroupIdAndJobName)
                        .setStartingOffsets(OffsetsInitializer.latest())
                        // 自定义反序列化器。
                        .setValueOnlyDeserializer(new DeserializationSchema<String>() {
                            @Override
                            public String deserialize(byte[] message) throws IOException {
                                if (message != null) {
                                    return new String(message, StandardCharsets.UTF_8);
                                }
                                return null;
                            }

                            @Override
                            public boolean isEndOfStream(String s) {
                                return false;
                            }

                            @Override
                            public TypeInformation<String> getProducedType() {
                                return TypeInformation.of(new TypeHint<String>() {});
                            }
                        })
                        .setProperty("isolation.level", "read_committed")
                        .build()

                , WatermarkStrategy.noWatermarks()

                , "kafka-source"
        );


        /**
            4、数据清洗：【---得到数据流---】
         */
        SingleOutputStreamOperator<JSONObject> dataStream = topicDbDs
                .filter(new FilterFunction<String>() {
                    @Override
                    public boolean filter(String value) throws Exception {
                        // 1. value是 json 格式
                        // 2. 数据库必须满足
                        try {
                            JSONObject obj = JSON.parseObject(value);

                            String type = obj.getString("type");

                            String data = obj.getString("data");

                            return "edu".equals(obj.getString("database"))
                                    && ("insert".equals(type) || "update".equals(type) || "bootstrap-insert".equals(type))
                                    && data != null
                                    && data.length() > 2;
                        } catch (Exception e) {
                            log.warn("数据格式有误,不是正确的 json 数据: " + value);
                            return false;
                        }
                    }
                })
                .map(json -> JSON.parseObject(json.replaceAll("bootstrap-", "")));

        /**
            5、读取mysql里的配置表的配置文件信息: 【---得到配置流---】
                todo 这里主要关注mysql同步过来的流数据格式
         */
        Properties properties = new Properties();
        properties.setProperty("useSSL", "false");
        DataStreamSource<String> tpFromMysqlDS = env.fromSource(
                MySqlSource.<String>builder()
                        .hostname("hadoop162")
                        .port(3306)
                        .jdbcProperties(properties)
                        .databaseList("edu_config")
                        .tableList("edu_config.table_process")
                        .username("root")
                        .password("aaaaaa")
                        .deserializer(new JsonDebeziumDeserializationSchema())
                        .build(),

                WatermarkStrategy.noWatermarks(),

                "mysql-source");
        /*
            读取的配置数据中 op:
            r: 当程序启动的时候, 读取的快照   before: null after: 有\
                建表
            u: 更新某个字段的时候  before:有  after: 有
                先删表,再建表
            c: 新增数据 before: null after: 有
                建表
            d: 删除数据 before: 有 after: null
                删表

            如果修改的是主键: 则先 d 再 c
        * */
        SingleOutputStreamOperator<TableProcess> tpStream = tpFromMysqlDS.map(new MapFunction<String, TableProcess>() {
            @Override
            public TableProcess map(String json) throws Exception {
                JSONObject obj = JSON.parseObject(json);
                String op = obj.getString("op");
                TableProcess tp;

                if (!"d".equals(op)) {
                    tp = obj.getObject("after", TableProcess.class);
                } else {
                    tp = obj.getObject("before", TableProcess.class);
                }
                tp.setOp(op);
                return tp;
            }
        }).filter(tp -> "dwd".equals(tp.getSinkType()));

        /**
            6、连接配置流和数据流
                todo 关注连接的写法、另外还要填坑

                 在理想状态下配置流先过来，然后数据流过来。但是可能因为网络原因，kafka的数据流来的更快
                 或者 kafka里数据原本就有积压，所以数据会先到。解决方案呢？可以让kafka先不启动，然后启动应用？这样不太合理，因为可能会影响其他模块使用kafka。
                 所以我们可以用另外一个方案，预加载，在广播流函数BroadCastProcessFuction那里使用open()预加载。
                 open里会先使用基本的jdbc连接读取mysql中的数据，因为在Flink里所有的状态操作不能在open里使用，所以我们这里选择用一个HashMap存放就行了。

                 然后让他先从state获取，如果state没有再去map读。注意这个顺序不能先map再state，因为map只是获取了我们启动的时候的配置，有可能启动

                 之后配置表被改过，如果一开始就读map就可能导致后面修改的配置没有效果。
                 另外还有我们如果因为cdc删除了一条数据，state里要删除，那么map也记得要删除这个配置信息，否则可能读到无效的数据
         */
        MapStateDescriptor<String, TableProcess> tpStateDesc
                = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        // 把配置流转为广播.broadcast(状态。代表广播什么东西)
        BroadcastStream<TableProcess> tpBcStream = tpStream.broadcast(tpStateDesc);

        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectProcessDS = dataStream
                .connect(tpBcStream)
                .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {

                    private HashMap<String, TableProcess> tpMap;
                    private Connection conn;

                    @Override
                    public void open(Configuration parameters) throws Exception {
                        conn = JdbcUtil.getMysqlConnection();
                        String sql = "select * from table_process";
                        List<TableProcess> tps = JdbcUtil.queryList(conn, sql, null, TableProcess.class, true);

                        tpMap = new HashMap<>();
                        for (TableProcess tp : tps) {
                            String key = tp.getSourceTable() + ":" + tp.getSourceType()
                                    + (tp.getSinkExtend() == null ? "" : tp.getSinkExtend());
                            tpMap.put(key, tp);
                            log.warn("初始化配置表: " + tp);
                        }
                    }

                    @Override
                    public void close() throws Exception {
                        JdbcUtil.closeConnection(conn);
                    }

                    @Override
                    public void processElement(JSONObject obj,
                                               ReadOnlyContext ctx,
                                               Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        // 4. 处理数据流中的数据, 从广播状态读取数据
                        ReadOnlyBroadcastState<String, TableProcess> state = ctx.getBroadcastState(tpStateDesc);
                        String table = obj.getString("table");
                        String type = obj.getString("type");
                        String key = table + ":" + type;

                        /*
                            tpMap中的配置信息, 是在程序启动的时候初始化的.
                            先从广播状态中读取, 如果没有读到,再从 map 中读取
                         */
                        TableProcess tp = state.get(key);
                        if (tp == null) {
                            tp = tpMap.get(key);
                            if (tp != null) {
                                log.warn("从 map 中读取配置信息...");
                            }
                        }

                        if (tp != null) {
                            JSONObject data = obj.getJSONObject("data");
                            data.put("op_type", obj.getString("type")); // 这个字段后面要用
                            out.collect(Tuple2.of(data, tp));
                        }

                    }

                    // 处理广播流中的数据：配置信息Tuple2<JSONObject数据流数据, TableProcess配置流数据>>
                    @Override
                    public void processBroadcastElement(TableProcess tp, Context ctx, Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                        BroadcastState<String, TableProcess> broadcastState = ctx.getBroadcastState(tpStateDesc);
                        String key = tp.getSourceTable() + ":" + tp.getSourceType() + (tp.getSinkExtend() == null ? "" : tp.getSinkExtend());
                        // 不同的配置信息的操作, 对状态的操作应该不同
                        if ("d".equals(tp.getOp())) { // 配置信息删除了一条配置
                            broadcastState.remove(key);
                            // 预加载的配置信息也应该删除
                            tpMap.remove(key);
                        } else {
                            broadcastState.put(key, tp);
                        }

                    }
                });

        /**
            7、删除不需要的列
         */
        connectProcessDS = connectProcessDS.map(new MapFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public Tuple2<JSONObject, TableProcess> map(Tuple2<JSONObject, TableProcess> t) throws Exception {
                //                System.out.println(t);
                JSONObject data = t.f0;  // 本质就是 map 集合, 里面的一些 key 没有存在必要,要删除
                List<String> columns = Arrays.asList(t.f1.getSinkColumns().split(","));
                data.keySet().removeIf(key -> !columns.contains(key) && !"op_type".equals(key));

                return t;
            }
        });

        /**
            8、写入到kafka中
         */

        Properties props = new Properties();
        props.setProperty("bootstrap.servers", Constant.KAFKA_BROKERS);
        props.setProperty("transaction.timeout.ms", 15 * 60 * 1000 + "");
        connectProcessDS.addSink(
                new FlinkKafkaProducer<Tuple2<JSONObject, TableProcess>>(
                        "default",
                        new KafkaSerializationSchema<Tuple2<JSONObject, TableProcess>>() {
                            @Override
                            public ProducerRecord<byte[], byte[]> serialize(Tuple2<JSONObject, TableProcess> element, @Nullable Long timestamp) {
                                //这里取的主题topic就是TableProcess表的SinkTable字段
                                String sinkTable_topic = element.f1.getSinkTable();
                                System.out.println(sinkTable_topic);
                                return new ProducerRecord<>(sinkTable_topic, element.f0.toJSONString().getBytes(StandardCharsets.UTF_8));
                            }
                        },
                        props,
                        FlinkKafkaProducer.Semantic.EXACTLY_ONCE
                )
        );

        /**
            9、执行环境
         */
        try {
            env.execute();
        } catch (Exception e) {
            e.printStackTrace();
        }


    }
}
