package app.dwd;

import bean.TableProcess;
import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import common.Constant;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.BroadcastStream;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.datastream.DataStreamSource;
import org.apache.flink.streaming.api.datastream.SingleOutputStreamOperator;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;
import util.GmallSinkUtil;


import java.io.Serializable;
import java.util.Arrays;
import java.util.Iterator;
import java.util.List;

public class DwdDBAppFollow extends BaseAppFollow implements Serializable {

    private OutputTag<Tuple2<JSONObject, TableProcess>> hbaseTag = new OutputTag<Tuple2<JSONObject, TableProcess>>("hbaseTag") {
    };

    public static void main(String[] args) {
        new DwdDBAppFollow().init(3002, 1, "DwdDBAppFollow", Constant.TOPIC_ODS_DB, "DwdDBAppFollow");
    }


    @Override
    public void run(StreamExecutionEnvironment env, DataStreamSource<String> dataStreamSource) {
        //对数据流进行etl清洗
        SingleOutputStreamOperator<JSONObject> etledStream = etlDataStream(dataStreamSource);

        //通过flinkcdc读取mysql配置表
        SingleOutputStreamOperator<TableProcess> tableProcessStream = readTableProcess(env);
        // 配置表广播至数据流控制其动态分流
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> tokafkaStream = dynamicSplitStream(tableProcessStream, etledStream);
        DataStream<Tuple2<JSONObject, TableProcess>> toHbaseStream = tokafkaStream.getSideOutput(hbaseTag);
        //将事实表的数据写入kafka
        sendToKafka(tokafkaStream);
        sendToHbase(toHbaseStream);


    }

    private void sendToHbase(DataStream<Tuple2<JSONObject, TableProcess>> toHbaseStream) {
        toHbaseStream
            .keyBy(k -> k.f1.getSinkTable())
            .addSink(GmallSinkUtil.getHbaseSink());
    }

    private void sendToKafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream) {
        kafkaStream.addSink(GmallSinkUtil.getKafkaSink());
    }

    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> dynamicSplitStream(SingleOutputStreamOperator<TableProcess> tableProcessStream, SingleOutputStreamOperator<JSONObject> etledStream) {
        //1. 先把配置表做成广播流
        /*假设来了一条user相关的数据,需要知道这条数据应该去哪
         * 有谁来决定这条数据的sink?  表名+操作类型
         *
         * 根据这两个的组合(key),应该能够找到唯一的tableprocess对象
         * */
        MapStateDescriptor<String, TableProcess> tableProcessState = new MapStateDescriptor<>("tableProcessState", String.class, TableProcess.class);

        BroadcastStream<TableProcess> tableProcessBroadcastStream = tableProcessStream.broadcast(tableProcessState);


        return etledStream
            .connect(tableProcessBroadcastStream)
            .process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {
                @Override
                public void processElement(JSONObject value, ReadOnlyContext ctx, Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    ReadOnlyBroadcastState<String, TableProcess> broadcastState = ctx.getBroadcastState(tableProcessState);
                    String key = value.getString("table") + ":" + value.getString("type").replaceAll("bootstrap-", "");
                    TableProcess tableProcess = broadcastState.get(key);
                    if (tableProcess != null) {
                        //数据流只要data
                        JSONObject data = value.getJSONObject("data");
                        //通过配置表过滤出data中需要的字段
                        filterDataStreamNeededColumn(data, tableProcess);
                        //对数据做分流,主流进kafka,测流进hbase
                        if (TableProcess.SINK_TYPE_HBASE.equals(tableProcess.getSinkType())) {
                            ctx.output(hbaseTag, Tuple2.of(data, tableProcess));
                        } else if (TableProcess.SINK_TYPE_KAFKA.equals(tableProcess.getSinkType())) {
                            out.collect(Tuple2.of(data, tableProcess));
                        }
                    }
                }

                private void filterDataStreamNeededColumn(JSONObject data, TableProcess tableProcess) {
                    List<String> strings = Arrays.asList(tableProcess.getSinkColumns().split(","));
                    //这样处理是错误的 :data.getJSONObject("data").keySet()
                    data.keySet().removeIf(s -> !strings.contains(s));
                }

                @Override
                public void processBroadcastElement(TableProcess value, Context ctx, Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                    String key = value.getSourceTable() + ":" + value.getOperateType();
                    ctx.getBroadcastState(tableProcessState).put(key, value);
                }
            });
    }

    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {
        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);
        tEnv.executeSql("CREATE TABLE `table_process` (\n" +
                            "  `source_table` string,\n" +
                            "  `operate_type` string ,\n" +
                            "  `sink_type` string ,\n" +
                            "  `sink_table` string ,\n" +
                            "  `sink_columns` string ,\n" +
                            "  `sink_pk` string ,\n" +
                            "  `sink_extend` string ,\n" +
                            "  PRIMARY KEY (`source_table`,`operate_type`) not enforced" +
                            ")with(" +
                            " 'connector' = 'mysql-cdc',\n" +
                            " 'hostname' = 'hadoop162',\n" +
                            " 'port' = '3306',\n" +
                            " 'username' = 'root',\n" +
                            " 'password' = 'aaaaaa',\n" +
                            " 'database-name' = 'gmall2021_realtime',\n" +
                            " 'table-name' = 'table_process', " +
                            " 'debezium.snapshot.mode'='initial' " +
                            ")");
        Table table = tEnv.sqlQuery("select " +
                                        " source_table sourceTable, " +
                                        " sink_type sinkType, " +
                                        " operate_type operateType, " +
                                        " sink_table sinkTable, " +
                                        " sink_columns sinkColumns," +
                                        " sink_pk sinkPk, " +
                                        " sink_extend sinkExtend " +
                                        "from table_process");
        return tEnv
            .toRetractStream(table, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
    }

    private SingleOutputStreamOperator<JSONObject> etlDataStream(DataStreamSource<String> dataStreamSource) {
        return dataStreamSource
            .map(JSON::parseObject)
            .filter(obj ->
                        obj.getString("database") != null
                            && obj.getString("table") != null
                            && obj.getString("type") != null
                            && (obj.getString("type").contains("update") || obj.getString("type").contains("insert"))
                            && obj.getJSONObject("data") != null
                            && obj.getString("data").length() > 4
            );
    }
}

