package com.atguigu.realtime.app.dwd;

import com.alibaba.fastjson.JSON;
import com.alibaba.fastjson.JSONObject;
import com.atguigu.realtime.app.BaseApp;
import com.atguigu.realtime.bean.TableProcess;
import com.atguigu.realtime.common.Constant;
import com.atguigu.realtime.util.FlinkSinkUtil;
import org.apache.flink.api.common.state.BroadcastState;
import org.apache.flink.api.common.state.MapStateDescriptor;
import org.apache.flink.api.common.state.ReadOnlyBroadcastState;
import org.apache.flink.api.java.tuple.Tuple2;
import org.apache.flink.streaming.api.datastream.*;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.ProcessFunction;
import org.apache.flink.streaming.api.functions.co.BroadcastProcessFunction;
import org.apache.flink.table.api.Table;
import org.apache.flink.table.api.bridge.java.StreamTableEnvironment;
import org.apache.flink.util.Collector;
import org.apache.flink.util.OutputTag;

import java.util.Arrays;
import java.util.List;

/**
 * Created by RUI on 2021/6/25 21:15
 */
public class DwdDbApp extends BaseApp {

    public static void main(String[] args) {
        new DwdDbApp().init(2002, 1, "DwdDbApp", "DwdDbApp", Constant.TOPIC_ODS_DB);
    }

    @Override
    public void run(StreamExecutionEnvironment env, DataStreamSource<String> sourceStream) {
        // 1. 读取配置表的数据
        SingleOutputStreamOperator<TableProcess> tpStream = readTableProcess(env);

        // 2. sourceStream 就是数据表的数据 做一些 etl
        SingleOutputStreamOperator<JSONObject> etledDataStream = etlDataStream(sourceStream);

        // 3. 把这两个流connect到一起
        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStreams = connectStreams(tpStream, etledDataStream);

        // 4. 进行动态分流
        Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicStream = dynamicStream(connectStreams);

        // 5. 事实表数据写入到Kafka
        send2Kafka(dynamicStream.f0);
        // 6. 维度表数据写入到Hbase
        send2Hbase(dynamicStream.f1);
    }

    private void send2Hbase(DataStream<Tuple2<JSONObject, TableProcess>> stream) {
        stream
            .keyBy(t -> t.f1.getSinkTable())  // 按照sink_table进行keyBy提高写入的效率
            .addSink(FlinkSinkUtil.getHBaseSink());
    }


    private void send2Kafka(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> stream) {
        stream.addSink(FlinkSinkUtil.getKafkaSink());
    }

    /**
     * kafka放入主流  hbase的放入侧输出流
     *
     * @param connectStreams
     * @return
     */
    private Tuple2<SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>>, DataStream<Tuple2<JSONObject, TableProcess>>> dynamicStream(SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStreams) {

        SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> kafkaStream = connectStreams.process(new ProcessFunction<Tuple2<JSONObject, TableProcess>, Tuple2<JSONObject, TableProcess>>() {
            @Override
            public void processElement(Tuple2<JSONObject, TableProcess> t2, Context ctx, Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                JSONObject jsonObject = t2.f0;
                JSONObject data = jsonObject.getJSONObject("data");
                TableProcess tp = t2.f1;

                // 1. 根据配置表中sink_columns的值, 数据做一些过滤 , 把不需要sink的列删除
                filterColumns(data, tp);
                if (TableProcess.SINK_TYPE_KAFKA.equals(tp.getSinkType())) {
                    out.collect(Tuple2.of(data, tp));
                } else if (TableProcess.SINK_TYPE_HBASE.equals(tp.getSinkType())) {
                    ctx.output(new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {}, Tuple2.of(data, tp));//hbase的放入侧输出流
                }
            }

            private void filterColumns(JSONObject data, TableProcess tp) {
                List<String> strings = Arrays.asList(tp.getSinkColumns().split(","));
                data.keySet().removeIf(key -> !strings.contains(key));
            }
        });

        DataStream<Tuple2<JSONObject, TableProcess>> hbaseStream = kafkaStream.getSideOutput(new OutputTag<Tuple2<JSONObject, TableProcess>>("hbase") {
        });

        return Tuple2.of(kafkaStream,hbaseStream);
    }

    private SingleOutputStreamOperator<Tuple2<JSONObject, TableProcess>> connectStreams(SingleOutputStreamOperator<TableProcess> tpStream, SingleOutputStreamOperator<JSONObject> etledDataStream) {
        MapStateDescriptor<String, TableProcess> tpStateDesc = new MapStateDescriptor<>("tpState", String.class, TableProcess.class);
        //1.把配置表的流做成广播流
        BroadcastStream<TableProcess> broadcast = tpStream.broadcast(tpStateDesc);

        // 2.让数据流和广播流进行connect
        BroadcastConnectedStream<JSONObject, TableProcess> connectedStream = etledDataStream.connect(broadcast);

        return connectedStream.process(new BroadcastProcessFunction<JSONObject, TableProcess, Tuple2<JSONObject, TableProcess>>() {

            @Override
            public void processElement(JSONObject value, ReadOnlyContext ctx, Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                //获取广播状态
                ReadOnlyBroadcastState<String, TableProcess> broadcastState = ctx.getBroadcastState(tpStateDesc);

                /*
                    {
                        "database":"gmall2021",
                        "table":"order_info",
                        "type":"update",
                        "ts":1624610711,
                        "xid":19672,
                        "xoffset":4210,
                        "data":Object{...},
                        "old":{
                            "order_status":"1001",
                            "operate_time":null
                        }
                    }
                     */

                String key = value.getString("table") + ":" + value.getString("type").replaceAll("bootstrap-", "");
                TableProcess tableProcess = broadcastState.get(key);

                // 如果配置表中没有这个表相关的配置,则这个表就不需要写入到dwd层
                if (null != tableProcess) {
                    out.collect(Tuple2.of(value, tableProcess));
                } else {
//                    System.out.println("DwdDbApp.processElement:" + value);
                    // 如果没有对应的tp, 表示现在没有对应个配置: 1. 真的没有配置, 2. 或者配置还没有来
                    // 考虑把数据先存入到一个集合中, 等一段时间之后, 再去单独的处理这个集合
                }

            }

            @Override
            public void processBroadcastElement(TableProcess tp, Context ctx, Collector<Tuple2<JSONObject, TableProcess>> out) throws Exception {
                //获取广播状态
                BroadcastState<String, TableProcess> broadcastState = ctx.getBroadcastState(tpStateDesc);

                String key = tp.getSourceTable() + ":" + tp.getOperateType();

                broadcastState.put(key, tp);
            }
        });

    }

    private SingleOutputStreamOperator<JSONObject> etlDataStream(DataStreamSource<String> sourceStream) {
        //去除一些脏数据
        return sourceStream
            .map(JSON::parseObject)
            .filter(obj ->
                obj.getString("database") != null
                    && obj.getString("table") != null
                    && obj.getString("type") != null
                    && (obj.getString("type").contains("insert") || obj.get("type").equals("update"))
                    && obj.getString("data") != null
                    && obj.getString("data").length() > 2
            );
    }

    private SingleOutputStreamOperator<TableProcess> readTableProcess(StreamExecutionEnvironment env) {

        StreamTableEnvironment tEnv = StreamTableEnvironment.create(env);

        // 创建一个动态表与mysql的配置进行关联
        tEnv.executeSql("CREATE TABLE `table_process` (\n" +
            "  `source_table` string,\n" +
            "  `operate_type` string ,\n" +
            "  `sink_type` string ,\n" +
            "  `sink_table` string ,\n" +
            "  `sink_columns` string ,\n" +
            "  `sink_pk` string ,\n" +
            "  `sink_extend` string ,\n" +
            "  PRIMARY KEY (`source_table`,`operate_type`) not enforced" +
            ")with(" +
            " 'connector' = 'mysql-cdc',\n" +
            " 'hostname' = 'hadoop162',\n" +
            " 'port' = '3306',\n" +
            " 'username' = 'root',\n" +
            " 'password' = 'aaaaaa',\n" +
            " 'database-name' = 'gmall2021_realtime',\n" +
            " 'table-name' = 'table_.*', " +
            " 'debezium.snapshot.mode'='initial' " +
            ")");

        Table tpTable = tEnv
            .sqlQuery("select " +
                " source_table sourceTable, " +
                " sink_type sinkType, " +
                " operate_type operateType, " +
                " sink_table sinkTable, " +
                " sink_columns sinkColumns," +
                " sink_pk sinkPk, " +
                " sink_extend sinkExtend " +
                "from table_process");

//        tEnv.sqlQuery("select * from table_process").execute().print();

        return tEnv
            .toRetractStream(tpTable, TableProcess.class)
            .filter(t -> t.f0)
            .map(t -> t.f1);
    }
}
